var/home/core/zuul-output/0000755000175000017500000000000015134127710014526 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015134142760015474 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000345323315134142621020262 0ustar corecorepikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD PJ~F,Eڤ펯_ˎ6Ϸ7+%f?長ox[o8W56`% oo/q3m^]/o?8.7oW}ʋghewx/mX,ojŻ ^Tb3b#׳:}=p7뼝ca㑔`e0I1Q!&ѱ[/o^{W-{t3_U|6 x)K#/5ΌR"ggóisR)N %emOQ/Ϋ[oa0vs68/Jʢ ܚʂ9ss3+aô٥J}{37FEbп3 FKX1QRQlrTvb)E,s)Wɀ;$#LcdHM%vz_. o~I|3j dF{ "IΩ?PF~J~ ` 17ׅwڋًM)$Fiqw7Gt7L"u 0V9c  ˹dvYļU[ Z.׿-h QZ*U1|t5wKOؾ{mk b2 ܨ;RJK!b>JR*kl|+"N'C_#a7]d]sJg;;>̺w^eD6'Ύȟ >Kdg?y7|&#)3+o,2s%R>!%*XC7Ln* wCƕH#FLzsѹ Xߛk׹1{,wŻ4v+(n^RϚOGO;5p Cj·1z_j( ,"z-Ee}t(QCuˠMkmi+2z5iݸ6C~z+_Ex$\}*9h>t m2m`QɢJ[a|$ᑨj:D+ʎ; 9Gacm_jY-y`)͐o΁GWo(C U ?}aK+d&?>Y;ufʕ"uZ0EyT0: =XVy#iEW&q]#v0nFNV-9JrdK\D2s&[#bE(mV9ىN囋{V5e1߯F1>9r;:J_T{*T\hVQxi0LZD T{ /WHc&)_`i=į`PÝr JovJw`纪}PSSii4wT (Dnm_`c46A>hPr0ιӦ q:Np8>R'8::8g'h"M{qd 㦿GGk\(Rh07uB^WrN_Ŏ6W>Bߔ)bQ) <4G0 C.iTEZ{(¥:-³xlՐ0A_Fݗw)(c>bugbǎ\J;tf*H7(?PЃkLM)}?=XkLd. yK>"dgӦ{ qke5@eTR BgT9(TڢKBEV*DDQ$3gFfThmIjh}iL;R:7A}Ss8ҧ ΁weor(Ё^g׬JyU{v3Fxlţ@U5$&~ay\CJ68?%tS KK3,87'T`ɻaNhIcn#T[2XDRcm0TJ#r)٧4!)'qϷכrTMiHe1[7c(+!C[KԹҤ 0q;;xG'ʐƭ5J; 6M^ CL3EQXy0Hy[``Xm635o,j&X}6$=}0vJ{*.Jw *nacԇ&~hb[nӉ>'݌6od NN&DǭZrb5Iffe6Rh&C4F;D3T\[ bk5̕@UFB1/ z/}KXg%q3Ifq CXReQP2$TbgK ء#AZ9 K>UHkZ;oﴍ8MEDa3[p1>m`XYB[9% E*:`cBCIqC(1&b f]fNhdQvݸCVA/P_]F@?qr7@sON_}ۿ릶ytoyמseQv^sP3.sP1'Ns}d_ս=f1Jid % Jwe`40^|ǜd]z dJR-Дxq4lZ,Z[|e 'Ƙ$b2JOh k[b>¾h[;:>OM=y)֖[Sm5*_?$cjf `~ߛUIOvl/.4`P{d056 %w ^?sʫ"nK)D}O >%9r}1j#e[tRQ9*ء !ǨLJ- upƜ/4cY\[|Xs;ɾ7-<S1wg y &SL9qk;NP> ,wդjtah-j:_[;4Wg_0K>є0vNۈ/ze={< 1;/STcD,ڙ`[3XPo0TXx ZYޏ=S-ܑ2ƹڞ7կZ8m1`qAewQT*:ÊxtŨ!u}$K6tem@t):êtx: `)L`m GƂ%k1羨(zv:U!2`cV, lNdV5m$/KFS#0gLwNO6¨h}'XvوPkWn}/7d*1q* c0.$\+XND]P*84[߷Q뽃J޸8iD WPC49 *#LC ءzCwS%'m'3ܚ|otoʉ!9:PZ"ρ5M^kVځIX%G^{;+Fi7Z(ZN~;MM/u2}ݼPݫedKAd#[ BeMP6" YǨ 0vyv?7R F"}8&q]ows!Z!C4g*8n]rMQ ;N>Sr??Ӽ]\+hSQזL c̖F4BJ2ᮚ苮p(r%Q 6<$(Ӣ(RvA A-^dX?+'h=TԫeVިO? )-1 8/%\hC(:=4< ,RmDRWfRoUJy ŗ-ܲ(4k%הrΒ]rύW -e]hx&gs7,6BxzxօoFMA['҉F=NGD4sTq1HPld=Q,DQ IJipqc2*;/!~x]y7D7@u邗`unn_ư-a9t_/.9tTo]r8-X{TMYtt =0AMUk}G9^UA,;Tt,"Dxl DfA\w; &`Ͱ٢x'H/jh7hM=~ ֟y[dI~fHIqC۶1Ik\)3 5Ķ']?SؠC"j_6Ÿ9؎]TTjm\D^x6ANbC ]tVUKe$,\ܺI `Qز@UӬ@B {~6caR!=A>\+܁<lW Gϸ}^w'̅dk  C 7fbU{3Se[s %'!?xL 2ڲ]>i+m^CM&WTj7ȗE!NC6P}H`k(FUM gul)b ;2n6'k}ˍ[`-fYX_pL +1wu(#'3"fxsuҮױdy.0]?ݽb+ uV4}rdM$ѢIA$;~Lvigu+]NC5ÿ nNჶT@~ܥ 7-mU,\rXmQALglNʆ P7k%v>"WCyVtnV K`pC?fE?~fjBwU&'ᚡilRї`m] leu]+?T4v\% ;qF0qV(]pP4W =d#t ru\M{Nj.~27)p|Vn60֭l$4԰vg`i{ 6uwŇctyX{>GXg&[ņzP8_ "J~7+0_t[%XU͍ &dtO:odtRWon%*44JٵK+Woc.F3 %N%FF"HH"\$ۤ_5UWd̡bh塘ZRI&{3TUFp/:4TƳ5[۲yzz+ 4D.Ճ`!TnPFp':.4dMFN=/5ܙz,4kA<:z7y0^} "NqK$2$ Ri ?2,ᙌEK@-V3ʱd:/4Kwm2$'dW<qIE2Ľ)5kJҼMЌ DR3csf6rRSr[I߽ogCc;S5ׂdKZ=M3դ#F;SYƘK`K<<ƛ G׌MU.APf\M*t*vw]xo{:l[n=`smFQµtxx7/W%g!&^=SzDNew(æ*m3D Bo.hI"!A6:uQզ}@j=Mo<}nYUw1Xw:]e/sm lˣaVۤkĨdԖ)RtS2 "E I"{;ōCb{yex&Td >@).p$`XKxnX~E膂Og\IGֻq<-uˮ◶>waPcPw3``m- } vS¢=j=1 W=&;JW(7b ?Q.|K,ϩ3g)D͵Q5PBj(h<[rqTɈjM-y͢FY~p_~O5-֠kDNTͷItI1mk"@$AǏ}%S5<`d+0o,AրcbvJ2O`gA2Ȏp@"0(HKkD4<80: M:'֥P!r "Lӓݰ@ 9n# " $fG#stV \'zMgaSZNg8>e!^f%cYr]qs:"̊;isXa]d+"v=x7p.fZCg_Ys;pE&\U}ܫSh])qKYAـhhdEnU14&G * QIQs;rԩ.k83֖8Muqu_48dHܥlW7 q>fu6+'}xu\Veelz`Zbym gp8펠ˋfpEOkÈWȤMف lOc;SR&.w,qk>MPs+Xh4iyuGRd֞q鮺]m S{c]U kV0/ŜxtADx"Xh4|;XSxߵă@pE:y]/"(MCG`ʶϊGi+39#gNZYE:Qw9muB`9`LDhs4Ǩ9S`GkM{zB<˙ik; JD;;3!4 2Y.$Dwiu|/lO:k$]ԜYLUҞ6EmH>izʳ/A+ԀZk"f`.,ל{=whXqYj5x%Rwóξ bnz݊Fg;hwxDm/Ҳ mwKB d$tʸhi!{Q8AXT!Ov6lS]Ox* /nf?*/NwkUrXi;ĘU & h}\15e,=q(ft&qL(dC!I;rɳ ӍhbO8.?Ij/RYԞOnq{5N' ( InNfOnSIw{O;Brnm]Lm3`Mi Cts00g`*钏xL(O>*3b\N7dYܞLcn3rnNd8"is"1- ޑܧd[]~:'#N(NknfV('I rcj2J1G<5 Nj̒Qh]ꍾZBn&Un' CyUM0nCj.&Oڣg\q0^Ϻ%4i" ZZG>Xr'XKc$2iσֹH<6N8HSgߛ/>uMik{Fm(W F@@{7+ߑ?X2hS4-=^YgpUHެbZ!y!ul@ڼ63" ۩:6=TZõ$E,ϓRV|G&$rr;J TtIHFE=RȬ]P pLm||$%>Eü%mWO[7x}~ے4WYr9Tk] AA$ұ}21;sbUwRoK #}u'tLi'^Y.,mCM)eu㠥Ѻ\a}5:V1zMzT}R,IA eEC}DnG+UaKtȃbeb筃kݴOǗ~f^o⊈ 8MK__޶oڷϽjg?nc]Zn\t3^邳塨Lɏ"8k8M~?O}P<xo~? fp,CǴ_BjDN^5)s('cBh+6ez0)_~zJz"ё`Z&Z![0rGBK 5G~<:H~W>;ٍVnSt%_!BZMMeccBҎÒJH+"ūyR}T~juPp- j\hЪQxchKaS,xS"cV8i8'-sOKB<չw"|{MC8&%Og3?D_FXJRF>i XʽAQGwG%mgo 恤hˍJ_SgskwI\t`ﶘ080ƱQŀllKX@1_16fio>NvT=E{-sVU>߇GUt\+n3X]Byoz)li$2cPs6D>TE-ߟn# rve{椱I |p)U݋7yJw&PzDgi { b#s9@*иrIA*qQN|Ix;I}&ݢ6ɢ}{]x}_o>Oo8S]~(EX{;6q6^9.EPHŽ{pN>`cZV yB 8yݪkIf-8>V#ہll/ؽnA(ȱbAj>C9O n6HNe">0]8@*0)QsUN8t^N+mXU q2EDö0^R) hCt{d}ܜFnԴ.2w⠪R/r| w,?VMqܙ7;qpUۚ5Tnj ۝jlN$q:w$U>tL)NC*<` `)ĉJآS2 z]gQ)Bی:D`W&jDk\7XD&?Y\9ȢG:${1`+i n8=%Ml%İȖb7AޗuV3A7ำqE*\qb'YpuHƩҬV nm=Ɂ-2=|5ʹ zi ' ׹U>8bK0%V\ t!Lku`+]c0h&)IVC)p| QUA:]XL/2La[Xѓ F;/-rtx-rei0hE˝ݸDt#{I} `v;jUvK S x1Q2XU&6k&lE"} Q\E)+u>.,SzbQ!g:l0r5aI`"Ǒm O\B!,ZDbjKM%q%Em(>Hm 2z=Eh^&hBk X%t>g:Y #)#vǷOV't d1 =_SEp+%L1OUaY쎹aZNnDZ6fV{r&ȑ|X!|i*FJT+gj׾,$'qg%HWc\4@'@—>9V*E :lw)e6;KK{s`>3X: P/%d1ؑHͦ4;W\hx锎vgqcU!}xF^jc5?7Ua,X nʬ^Cv'A$ƝKA`d;_/EZ~'*"ȜH*Duƽ˳bKg^raͭ̍*tPu*9bJ_ ;3It+v;3O'CX}k:U{⧘pvzz0V Y3'Dco\:^dnJF7a)AH v_§gbȩ<+S%EasUNfB7™:%GY \LXg3۾4\.?}f kj· dM[CaVۿ$XD'QǛU>UݸoRR?x^TE.1߬VwխmLaF݄",Uy%ífz,/o/Z^]ݖF\\UR7򱺹...m/~q[ /7n!7xB[)9nI [GۿsH\ow!>66}եl?|i [%۾s& Z&el-ɬeb.E)բA l1O,dE>-KjLOgeΏe|Bf".ax)֒t0E)J\8ʁ,Gulʂ+lh)6tqd!eó5d ¢ku|M"kP-&ђ5h ^pN0[|B>+q"/[ڲ&6!%<@fpѻKQ31pxFP>TU?!$VQ`Rc1wM "U8V15> =҆#xɮ}U`۸ہt=|X!~Pu(UeS@%Nb:.SZ1d!~\<}LY aBRJ@ѥuȑz.# 3tl7 ]وb Xnݔ[TN1|ttc‡-5=VrPhE0Ǐ}Wd|\aD;(;Ha.]1-{s1`HbKV$n}Z+sz'ʀ*E%N3o2c06JZW?V g>ed\)g.C]pj|4逜*@ nBID f"!!*7kS4޷V+8弔*A19`RI/Hй qPq3TY'퀜+/Ĥ'cp2\1: 0mtH,.7>\hSؗ΀ѩ آSNEYdEcaLF&"FhQ|![gIK v~,Jc%+8[dI368fp*CDrc3k.2WM:UbX[cO;R`RA]d+w!e rr솜[/V`+@;Τ`5d0ϕ_Lع`C"cK>JG.}Ε00e>& 2䯫vNj31c$ i '2Sn-51Y}rE~b>|Ď6Oj~ebIapul9| 3QtUqSCxTD7U9/nq.JYCtuc nrCtVDƖϧ;INOKx%'t+sFUJq:ǫf!NRT1D(3.8Q;І?O+JL0SU%jfˬ1lމZ|VA/.ȍȱh M-r ~[0AG꠭y*8D*-Rz_z{/S[*"꫒?`a;N6uilLn<Yllmb rY״͆jqTI!j.Pٱh s!:W_´KxA|Hk1nE6=W|$O -{]1Ak$ ѫQ6Plp;3F$RveL l5`:~@c>q,7}VE-Q8W70up˳ A¦g/OEU:غA>?=CۣPqȅlW11/$f*0@б 2Dݘrt +qrx!8 J&[V =͋A,z`S,J|L/vrʑ=}IhM4fG(Ȋ1{TT%41Oa'$ {6} %``{fq;MvS⚷EoIɒce(frp&Ůu鮒4q=J"2<~Dƪ0l~ XmRcU|wAŝ0~ƪq;,ʸۧƦ;HcRD%dw_heДUlMegYrr,|2.q<(G3E5EZS"/q&xOo}s7c@<8/2,o<%e'FtJNؿ8u Ydq4 a//]4Be22Cˑ1rMÎ\=pg,,ǐvd/PLE60]yNh 7Cͅk^$숇+MZ3m@ׅ7Ƨ4G$}}OCBztpzkeg\׸ξFaOwGbC`}c|?񤛮9O|&*P^r1Pj!!O kyV^LfU%Q _'ySXjG̍зؠ^bR+c0m vP+48P@faz]zqŴÁe퀡,㲆RkEwRe#cy.|߷iaev")lݴ,qa> ;̌B͓֨Ѵ5kZ R(y" &Bx5"&/y UNE$ >9A.c>r'PXa{}e_UG"`N.,꺨^jV꺚k[u^yHu6rZL [-p2}tVڤRDM%{%'9,x)&f* \L'I,e/AL|5u% NԴ .(G?A/()Džs-#trilXL#'ϞHȣ/٘L767ei0kIw`:_KJ.n v{AO,2hDVy%bTJθ.:n9ڝ?pvk$GmÊ#5i2ܙYߘ7^ONMv} o <>4ʦB /N/\d$9{{~ w-y<6_s z۠I`go0~ 7 Px٩L?7-dIB/. ./kP좢_Ye9@U 4<>V"}}$P771%9gJ\ӽi<:oyx$(?!b4<7uOx bvrSuߟ+dv\\ʰ)Aa@XQLK <&ɿ/q]6;hx}j$i)P剼hp']ղ#<}A~M{x uC$lVN2a'$kgK_73]RNئ=HU Gҙ"!ggaalc>k5ݛ^O4 z{|Oa)x#d$$ fQYݥySix#f"EXInii}i!^\ђ8#C'zSL cF<>>xRE`-xLW#~9r!$7 ^nh7o VYs4C<BI0tZ,ڳGX5Nf%D[WA 5_oΈ$Yg(T*+|v:V:cbLx|Խ;-ah JߢWClLa)~7B[0 /^PZi"{ H,qTT>ʗ,~hx3F碍ҊHVN4LC*TVQ-N2]8QܾqቔVk64!!TY;A2hsx>~zx2[<|J> Alfyp kز/"u^&\MY X:4 vb xk>{$dě>&ҷ^ @)DciÛ$j]0^'65 &zd[T\"=dJk븊--%T̜?5LwjWw \(DNZώ_=z ~WW8.aNoN[jMFP6t--)LVZbEC#T*s8ݣn][+E`?<,=5NJ7e96c+M(GفaNtfAǦkXa`Yqn]_ڛ4tak}s 5{ݬ$|:[P )~-ɰ^%ц:XN k)jlJGAǞ[ۭ|oL@2<_`_a]S\ ;z:lBW?4O]o۸Woqw/E}Q.MnM4l@D[Iv>.~3$qj6ď p83h"<29u j,(C4& qoD>QS!w%o0yz}DjE\E0̆r>T z3P۴alKu_'L {|5S2=㷦 UVeʴPI)m~t[_r6W{j!".pu0*f|[(%RӳxbS bTM*'^|]wkJtGn6u)viw/>_O.|toZ`\/hw~UC뇊6}2xs"{65( M{l|iA m#^lkGN kU_0E,AKj0<bE~LjXNߢ L@X;;j(mè"ֆ_nwM<5 l^mE 68`Xj50vdjP4t-BxIr:*ZVᏸK"$oofcT+MͰ?E,1RA ps"(5@ޛBAS_!VT嵘tB C_\b C9=!̖a8U=!9v@iQ7+b[RᷖCk Y J`╭oW H⓭o5#jnEP~[.plh؃`Nj`s_t(n5vL FJ&_^_\W*~x[1*۫J p(yWdԧ{e@^dո|٭w-@qƝ pg- _݊,^+V|@'Y3Wt2g.Pܽ9<2KmgwW; 4.Pvk(k{k2˳1=a׾H veڹ4ql~\ͭ 5r80^1[o}~zL'nzN㰍y7/Vh!엏=ݽ>`<ٯ ؾ0[# b!Gݏu@g*}wTqrxɍ*GCm,2s^gA;̱4J{;fe"l_)fo:U(hhK;QX+gpKIAKn4hWrI pH\ yЧU(z˯_K¤yj3)Rd%B(n0]n`'L)$}㻈fV h#8S$\N2s 8QIx>Bj72,>IdP#b7*lo5ĜXڝ#ײ%Q݀?av|4C+ˑe^ESl b¬f9gE'dn͑AA3HtzRho0ɊeZ^Ǵ:CL7t-Ow#W%i A!ס# :~wM#3.\pbQXcҥ_"8Byp]-́D=9>"?ڴ04:}^( {4iԨSf׳mh(rš4oaC(1 xDcx#~.E+E!8A;"k`#\2Ut:)^u2Nw*=DZEٓ'E鹣 EUg#$ (dO>fu}f[ 8i 2M 2os jLG˱mGnc5o X-g+M)W?ZnHA;^ٝgˁ":6n~W%TZ&$*ut(qAYQU>.FǙQ3tX -[͓QI7&(Ėu/:};:̊{岰w$râjv\ ) Iόuͼ;ov>C.GNnhSEJ@W .~v>P W!3/B1,h ɠ$}8pF[BjL4dSg}3b3孴ԭ<`ٵX,l"@= :JSWK74 K j]XŸ2 1|I^&kQNŸ`H۶Y|k-2:AZtIx4#>:;!fct3?},awV%?_Ǔ[ߍ h&,#Rp msP]!x(n^%~|u|t<'T GIo^۶U}zC -b&늺pWcV$U9 ]cHi@ҬhCEi6i~~!'EۥN8f>޵砏4i_M$`RdeJg?|$U5-jn$'8 xt~<9GW\?U&>\Ϧِ PEB1HoRq ȡ<~^q |v`"Nq*_<`Θ /m[#φB-O g"[AabLxL])Ǥ0iչYEm_"挊"Ad"H# ٰڦ-cJM ȹfJ2/^~$ qPꢧVq9T7򳓨bиtQH]N_ʏ(6s:Їb6mP%ܳXGD$9BY8Bq]3(뗣*硯Y 0(hLHquH QO:/y,aĒl:%ŭN#n~)˙3*|AzJd8 IC?0!j,Rpl&y<`_HZf G{ހgxfHf;E;$)^8 T9J@oo ik?UMu:m׌vyY!ىgn,"NfQ`)*P7+< o:\)u]÷gۆ|iIvl#+{,-ϯ:#Ȗg#,\17m~\#3ze$:4"bI6[Fyt߽ɦI0?qNkf\բ&HiGhuN\~Ứ_|!O"tL!)ă_)jUdBFcu1S 1kn,=}^* CP$ b-,.gE=HR/}]x&z^ wTv[j3ۢ|4&4Mmgɑp;>4.L]*١yׂ\1ЭQ湢O%kMpC/O=dhQ8N- K*u,`t[~f4XI j:gMUi?TA릳 ,x(68{W<4UJWwP@\_EL Ͽ5{k8!?<46ajLG 0L 0߫ñB87*4YTz?4#yO2PK"Mŕi>ϋad6D7a:*H) a ZU.NWJ$]y-I"Ekmҽ%k}[x fs"JVЙО(@J\m0XyKVʸQM y YR%BFq>s*]4R׵H91o`6j!b$]`Q G޵-$W\01׻QGP#-`%=ՠ-[R%dQ qA#5*3"C]j>vi>%ܦ )7B4XK(q-G)98?!SֵZxn5jћj$+k뿇kEp6B:xx a=G6J"D0(.!IgK/eR$s$h#hJVej{>-TFC.BrG] 'E҉z= :^z>Wn9?#ǙYص>:l\ln! daq" i:Jy``?9,:9@%&tԲB M D98GPNɂW?ā>MMEK$cM4BQD6Gʪ\ʾHݗE m&2I:TT.]Hz] B=E֜SгBu@REB&"cU4E >=\;(&[o| Du1"AAj" n |zYpts]oY{6i!=hJP"A )b0.ۯqXN'K=ށI1 l9cQ cx绕Z*͍Hz (E!xT>|.:H5V'eW#ݗǽMǮ#Ey3Dd# FNH\sK<2Ž% h9' fbϚ<?>+3<3rI72m6%IgVHb/eU7AOt Zh2훌(]JD@}[%VÞ`z 5gTe"j#|)ԪKbT=%EGK|[tNIdaV!1JK_&Gt{n;0R)>ޱ Z(K)h@EҮJEvY۹y-r,Cda}mI':?\sh\ scP|1zS%I/=A]ڎ W(7MIn{Mܙ;Xpsù^#՞|D 6uiLDیX19Kh$A~bƪ0/EҌU򢺵Gjb]Zm.*|[t`R"S tT:T@5ɂ Vh8R$฿ 8\xy@8aoZXМɈъ(ΏqZjDuM3o'Ḿ χsO9zAAb h$(b;jUCޢG\MU{_LL$X8~nN8>_: fn޵ukteG*ZH/x:>r^WSMTRԠF}FgIt05*Ck$TQ" XN,zec\?}Uk‚Rt8Q㘻O,8qQ GA~U,8ڹIН 95UF+ч3k_\sM?FrI'p"ht}Ѡ2&S!n/n:L&N k9P*!jчݘD"f| 4ϔQl.M#uúP`$xQ6PgLK]`gh&{h+QFБ+{iN AмBq?חyC/s$<4dG&SMٕEm030#/aY7։Nm*H:bqS6O5\bC DǯȆhV(`u<;Hk|XV3$.s_< #QaRyؑKUǰ5 | S$f_Cgq2먲Mz.]=Ë 9.JH:M Q ފ=ob+!X#^@H3%@3-hJLwۛWاob_T osJ"E/HJtrU5O,8aG(δ֡ԙbk"%^_X0g |ךDVXW&f{ NwEeHɚ~^?:l :ZlHy NB6RB7hxe6AĹ#l&t2IACCݎJD<}D+Ts g%eTn0!ަ;nn,# 6B4<8!cWF5UsY]ib/Kɣ06;A$u:2(7O4R,8N^`}.QI\:8ʷ(4EGEFJmd/]`Ǟi55$r"LMN͢BqkN6_p8{+RxۑHq՚Vk>OQ$'y=yJy2'6FW~5.qz@ߟ>ן~oJ~0J @r6iY(vE/5zȜ]=:)Chc3|/ZԶ|y$ge[)e)8vrw'>V_h5 !E5u/&)}oͻ+g/es19zk ]ljg Ed}I .\T*hΠdNʂًr K5\d#"konF_%CuTjͺ6s$?2)Voi )D9rޥha/5Iw^?Ou!z~Ua2 7a\p] uq*ﵭ}me0L{d8E3 )FP)ɹ|LiVjp `7&HGGz^M#+ }|.P+W)Ϯ0Qb|s}ZV~>k GZh TX 5Wfҳ^ 4 \br؛~?ŇhrМV{稖h$k (x cE8K3-@m;&g`&? ?Cn8 8(ƻִ>|,"1X08+]Kd4)S8o `ހ9Õ|`"Z L`*)Ltųg\O'Cs}ZiZnv1 y~>e;7vEp =fuM[`71acvs4*zv7sZpSnŽBx>YG.ɑK,=_q#,1ŕ{ ԒW}UG(nyat`vg-7eޭg8|i#8͑i{)2 a%q8=kޅ~/:-4o#_8J2q> ?HL֛:lG5OF6l|]]j|?FT`c]!R=j(1`|w~ evh C|//||uabL$ ,+* y)[ o9 rM&s^a.hr.~7x`ohΑd"F8̝6SLmvX _<{梺]׽eӶp)TX(hi (&y8$ojq.m?ڳq*ZuMe`f6yp9l '.V8sSc$fMNv|Akv/38M;@u{~ƒ[%f3yw g- %ׂZ%aNci‡A${.ȭ Y8L7b&2 ޚ\5˜N'*fAkn` W'݅n^z#x<(m-8p%jlt!X0̓f:P~`I!LFaIہIiҰLUSZ5j7.<㻫%(-B%A!]J4^ +9.L [ W΅:@]#uXHbKMԶbJQZ- `RVK&>&8=,;dLKa%"gAmML?jLJC4;Tغyd1&;ܔg,-6a/3 Q쑧;q^$G; f`>ri3p ǁfѵ˞,/#Vcev7<.7йTIny}=SR^"Qv'Z#/k3k|ڍk-[d~ ?Q'7"kuG0"-j #5Y Wv33O;y&G}^n{T(AlÒMi\ךeY01Y<#F)f"5f{w_AHɦ]TY{>ȋQ%ЄR0v@,Thȕ`>YjOߋeR^,AĨc0_BFV+P~ΰhn*obE5ZבX"jT_bVʽ~z 0cy;=E۳\V/X7Gq8:FMXx.ӛ$Q{kb¤_<ۘkڡﭼǦl,*6~$ecw_W!/-ίvqy rFw~ai4QjNi55sWU<);(!:˳af'';[vo6|*]-g#J2yD҂q?ڌ] ^ػ'l#nP+Է¼泆 kaOW"Q{MT"=AZ\jDU/;4Q1'w]k(WX3B,2!#WOM|^ PuQ!+Q;pی,#̞ q{ \k$ˣ#t+Agm  )?>%ݚ+R[S1/IbdiW]25bL=FU`cT%t1j U1eǨhIPxj!jO7WgpeTtA4OʴE;p1Ӫ<+#eZC#;"jq0͍Zǻ,>ٓڏ~#V^]1d~GVtQ;<ƣ$d}SlG !`wCީ3zIΟ/ #jXkj"Q89aF.cq`P05TՒא A<x:cq>dh!cm0bql1!,$a btl c%&%nG$nKK΢ܹ]vUL`Ꝁd@^ƷT^rLlZ2`yKxǺ5ҥ0嬣V4t˿@.jʟ8ꇋUQˠV"G][muҲ#^Kp•sb@V: uz,n/fYoTG-rs-Yj]ѳRC1hcA 9yI"I*,JbNSDX,Sŵ"|5PU.<溲}eT(O8R%a@ĉ O )QI` TC H K6O3Deg%^5''jaӤg '_9|AUoPKWsPauTb+ѿ4a J`$HiPE ك|sc(a?A8?O.wInw&53'k]/u@ۉ-hVHݓwNS6*vOa_a />|sN ځh0 nq??p0ޯ eBo zBn7FgdO"NޛId]wNN,};39>prrdǜ(9X=ѬD&=drQ\!rQZr!JY%N(׃zTD)إ.:2&T'c1L=.@^I%:&p *Kji!g+@ƕ-&DLwgc-6%sϞ@1lOiաǭ*)D߳8n=ܛt_.2>UG#q϶I L))rV4 tR߾{ B9LJBjh$JXQȴmBHixRMT%11J13)MW'?j <(סƱ J7 !04E\c#HւsgH,T4&1 (G"c& " "FS 8kj)܄B d1(F D#4*b`IOdВG}a="G+d\3Ipߠɮ x1 ON%b0U)?_(JrQ\Ol6tiV{w'kuZ6Iz6;ײ;Ql}Ń!Hɕz]:^[md>@gVJ( t*dGAl"p)e4{m$ ˟2wCoU$Sʣdj @[gYtDig+lIQ6ŹImm"~aΈc)vB8У1rN;~IMWx#<4}"cR'1NF 2g0uXL2lS'huv>IGCF8ʶ7 d mH60F#icJi㌍iE,rF^%Mwr. GM8L'4 `F: ʭ̈S*45& }#\vWg&_}H)-Ë?M)?lWGEqqW|L 73ϮgWH&A6:2P1:V)[&:m'_| *Cph 6I4:8y c%8Q8# wZ"ѼwkP,:Yfs^y֋4_ W1e:3KxȠV.L6dRhm;˅*_A$4gŭf2 ;pCbMz"#2iK3圡6aqhb:)%<@O,:kԋ S4WN79:]H .FQ8'Ҋt]cO0{(ɇ08HPՀRs䨼)S1Npj Mz*3'@6R^\FQƔS(g(3& n8"Jk-%_ R1os|漬a=/M^lb&GƤY`cƣY9Ap43iՑ-sRYu `!J}b,dC8μs˄@G{DçuXIaXnhV]v׀kI@IBC͔ 3 l ۠`KIA-z (\Ae#Rgڝ1[g"9{E} |@Qt"2C).`R`rpE `: \F(0K3Z+x"-XL&s 9ZZEϝt-4מ犓q)k h3K5c9wE&>cIf!SL-jNR)Z15.nPbLVV47h" z Y RP4]TB %L :"QSFY z:!H3Qm!B9e+X`Jqr |=wdANSXnTPe]3G!r"gT QJ)KdG*5YeWGI%$zY%5>'Gz`P#R60}}).*Z> rn(AmSFt0Ar .Qmm5 muy`%aKNe|,>c h$PfZ.h\yp-'hgNA[*==*uQ Ԇ `u[!po 5Ds\nR#XJgi=Ge U}rg 3*P&!<91*fZxʑkyV7c5[T1fl<69KeCC+4[^in8TS'hjCڬ=F+t<"0Q1_Xʼn$FW[K Y y =Dњ^Jł`! i6hL 5u3.*V>Jdў2Ui  @pȱUT F˃m@j}G$&"ڌFL hy6SRT *)'AaoŘ-eݼYU]~VJJvsΌ]D? G%w.%LE&%DOxO.Fgb)Wm顼.` 6|Sݎ7k7pͰ|w*23Q4 & QS܉UZڞ(\z9( @txp \JmL8G<: (9Fo& :,m;+XŸs)5tThs+j7Fn(C D"\DoQfϓeX`SkF𯘿ѭvaօQ i5qu" %GmquƩg.D?C4m4۪ts="B7_baYE|PDɑ.;] a&#|K)9 _lfgH*HJҮ?+T|!*Gan>YpiX)n;ޖl3M~Yku#qG ȈXL(SJ4v6[.?g.W~J< u҃Ғ4a4D_![w6VGL>9&.=9_O]wB'G_ |?r=_A(_~mvOFpa~mZPBb#e[\I߸}zoo Ӂ75SSB'R il²Zl7Z@b0A{; tWҁ]URBd-SU*V(0rY\Nm?6OmmM ɗE:.4go56#OrF+x\DHœ[kboﭘ͈XkS@æruT;e1ͩcN8bpyGuY# hmt7VSWs޷ʶ2B?lUyђqSL#vr%ҢXsIc`52=VW'gGpŰݗyV!\sGXU.Qdme[}@>̓T v D~\lᏦQ{GpMt ~刯*z@U_1v,& Y+S(Wk[8 5S@c\VS=>l6/k".G(_=TF~l7=eδ) 4R/a76| )dNU 12Å1rݲڍ+[qJD§պե{[eA' />vqޞ@ͼIt~sY ޳8?!2t@Wg"A{MYlrxqer_@s=F6a*gU_4LQ=j=~˴ ̝UnCf=r9+Uj>]'׷kJt1>hs,w͝uHƞvsc6.Edž - iypa/bf`{_K> UԾ̋=tO|ʟyѹN2|ySéuW8z;F3ZB:DAwzwMMqYQ#|31C1b;p%HdYnn2F%; lV3fO| ORmdn/qNtdQe'c p||*X dZ#ۋZ+౧V:B@l>[Ͷ_ [V^sɍRN^ F .@>yYuEw/O-+y\IZUD<Јb!^,ϫb77'4QZW:FfBNHHO:=z0~fOEf#R %p"]-/yQVvo*?>:~iio٣_e.W1+S;m/翹 &N}ʏ>XsC횯WyŌ]v;b\$B6T!&{R,ƒ판fhä|ncLAf#>?o~&~ghᘆ1jN+&i]Oi@?e PhIkQh2[o|e?(4/ W˿}3!V~.Wgr^XNy&U.$!wU:cYﻌvwL0~<`C-@dbgpd)>xFyjo|҃3N:jo@&uRϪi [4Vc5=xK_\x]$2l?LwŪz:}פbrQntN|&ޘytwc]C,Rw'm\!܀z,>!ɦS7'^r=}a~xL5Ƭ &!<;Kw KJW/nW5܍_Q8eKכ_b!UhG5SMBx77wFY/'O/ r1h Ms3v h]@͐j4Ϋ{Ra=\N# 辵<鹧V5: {Jϟ~٤e,Q{Wzj3xװj,eΩ?T,^n`iZl]\ꕙK\@eb%c݂oO5ljw{ƒ9Nk ͋{M'wT! X#`%RREB- ?_q~oq\:47(fGj &T#/7v!xS?줜7y&Hj]|Y2_wr~G|q@KM*as5ُ?T,/{4mq ][[~9z\S@W ~2.fu~KÓۋdqүwcٞ?u޽MM%%%?;ϕ~Y^$|ʐ.SY%B6pysISD2VV'3)slhSw4(SV:?jЌKwYEvTLmsk[xu\^Yfw^xsL1s[g̫8nl8rzyuuܺ /;nU~qwsê=Ej=& -l^6nڒN7Ǡj>k4(ym/ 09q69M?Vxr70x,_?[l޲/ޯ_V6ݴyWZjrZS?ZcUնۧo}?ZzO՛ ,Wd͜ q-TpX1T~MԒc/LӺMa*7k`ǔ2Ɋ1ME:d hA#TcK0ސ\Wq' 1$hIG#-h1ZȇBhKB9#pKdE#2DN{*V1@ 0^)Gk~K8_>„;IcJiU)%:I tPxgd])+**#H Aɥ#%.Xヷ)qKθ`ƴn# o{]HI X',̣W,6"AEQ6!޾[5TLA'iLn=t8@тG>rJs]l?fث]1nL2 ;0v2znT=( -w/ [?jzwv{w@6hgcOsݝwz?y,fr7>7 *>j] s645i1E\Ai!\^ xb+  |ќE>r>yoYp$*0D)/Z`%h9S,kG.0?iG>g5/ovŭn#I+4cPV6>2f'h0ȇlh0?ŞuI*ţ!1FOJZK ؛ ֦hbőkp\XмLḛ?(Z cHǐE,ޮj|+ jw=z%cb^6Q'{wS ڶw PxZƗZdLJ!a nQ[ OU XY|z+f"2-qіٞO=жzo{b5'k{UlOsc\^XmHEZsR^Ux񕴰]Jãȇ~Dn㷎ql0"Gas: \^38'Ah6QAOerMI p*P0GRET֖ACFsѾC܉I.X6u![IoHR19߱:&CI_m^ ETt@c W\@h߆E>j1ނ?.#VT8J8|qDhŴ4p -|qC5b=^cs>6 4>JvwoP{ @s5P a~NBUw)iIZP]_`vAHRMZ\1Mڞ@P4h$,V~]o?X!Pk%8IdΖ̉G ܑ3͢En/ZP]C;Nxቦ*I=ReRӊhȇb;w1ø$P"Sora!(AW*F-PqAe#Ob T GОB#q%ҏ `]4mѢȇČkbEub9‘,6e{ȇbktAKn6=`tsV" 78݁j(fu˻qH\x" bF+!K:d4/E$Fgboo.Q].\Q1 ͛vIܼtX0Z2Iry>Õ֔t8|Y/ G>BNe{(5$!٬W_:GT|NB\vA22#lB@ӴӠȇk܈Ūh5b:y\hcFSt 4c̿1^,+| ڊMsU#@Gj-@M'0P6ࢲcŅ's~+ xPC!tLեFdW,;.g欷(m&zGtͯSLWf0eEܥ\Ol\NA~z#00̙*z0X .;ynOB|.}ZLr(տƓlt8QIWҠlÙxʼz:|(RL,ɡhg;-hb"v "`Fs;cciX*r*kt9+aؗ Qe*dz#]I‘x>rFC- fO\*:#>FI0O3nDa"ar26~z٢;dzьx7spaoK)_R' ĩ>Δ|~BTNGh|alGk<1F;b@{!ox_@sV7UOIة5}FGwi*wWV鬶mVێz?y\Vۮ_jE!{'IvLu|:a='yT"JC$J"f 2ll(򡐫~G-<, 9T4pdb載CF<ګE>r;P 9*Z&RS.[ߪOWK*FacITi'ӊ{]%GE>r6: zA~}u qя2*;Sr[8բTJB*A$>7`m[_z̨3rglD'ޣ%ׁ;?8F /QDKjbzL ޒ꠯UtgRLEUzʳ(Yه 2wFS"G(jB|h'<(H'YVK/RyU qVz_E)e#:ܬ=rc%f7I%fIY=q*$70H%۝憁ţ-T&g FW4:PÐM"=9=stxc5;?SeԋU'>f#aEW@^0StU[7A3=\֘*2[b wv9WYNC*.IP2>hP= ?J!`j6䵣6Aճb\ҋ`ڵUHP\t~yqJk!cm ˼>=l{lԨLu2f 7"MHKEf$Nҭ=^iWHD /VIJ2ғ㨶3|(|kQAWJwD9.stZ)>kٖ|DgƹCƬ!z6ePeI|RG3LŔsrP~ʙ#zfi{PHIJTa: AGX{6}PC!|~@P5 ;K̯:!=@'g\YaFOעt0 0ʍ\N CWˬUT * 2;EuczVQw"ۍۺP15$y "+;DFC($ϖ_# P:094L7E>x)}}u9b~007ϙUy3c#v~>5F.W &jp8 "ۛŁzly},93|mjh|p"f3)BZΧrnc""PZ;dq+bĹ:d{nX2flfs7G9o&z^J2%ɕPD48O֛R.)uv(:݋*SGMȤ^H"L*Rd4N{E>r>]Dzu}}A~ZS1n3sX܇y@;Uf:-WsV3$%Bfy^F#xrPto[ۣظQqaQUH3s2EPZ+ay儽(/8"ܩH2w-= fqX{Od&JSjZRWuׯwRR)EJLvM$+l9ei4Up2\~6[m߿=/כW߸6o5*gqw& >  .k@ *qa<NYc~x~ceX=co}5|vxDKg F(y G)\^?21(y JϳE#.0F<A-/ooCfJwU'W!by` /Xr++`ѬB֞hvX ]r.gG# w.rVDf7Et1F?*]3o3Q拆b*C#,oDd2bV:n Vj*^#pXoiCԍ"T.`}Tf1LjDn,Ado"<[ч/8,)Ce=jBͽcVY aG~*GD7|լ4S~vV/eJ/#_1=஁JZG t_;Z;F,G?*Gw|{|NOꪯ0ɷ>0~oۻUOֱúilK|57<Va~7.u~1.#𧻓s8А ʢ7P3m*@hU)rOQCU:_ T*GY5c҉z2-#gƸ|PSN XiGX½zM]تʸʒh}P ' Yҗ?8C%Qi 6z!m7&b2e=7u=xOc;tށ>pM@#֫ G89E80doLUS.e]d,ƞ=8S"SN'wC-h+˂ >QrUsgPX["T#v3 piS~ J0?Qe#Y8-AIW}3p9*!^Tȸ,UV9N2iPzD8{1lQam.3^*s%gـuQY_Ml|kGN^r%-q=waOW쭆[ U{rK+da* 16]l b<67>3}Kg^ٿlh/IP9Z֌5#iY}7y\i+B H0E3JZu\sgTapʋ^~[dSYHYΫj<*kt~G1_wjAݝ-RRDQ(7Se_^\ے}O pø᪆ Ӈ4#XwW6[|d\3jɸƇy\ (+b?k ."/ Vi Nh  ґ^.{mB=wa{.zD=)9zO;n{F"W]ԑBPHLɑvE o@H U,.V `̮.qc}X<y1g++ŧ__ B6=<ӏ KQl']@ AMD1py7w{ؐ'*Z0WL+0N7i"Neq#o!!06p$p=;oUh; G}poWJ PH= ƕ.1O,1j>Ag ijo1 _G D~K/oX3VZ(?Y#O?hrZ,{$~jA8blvK 7CŅ}ۨUzN/heF7b#~t}?o-4ʇ4;X#itP{Xxݎ]hN^*)wj9Eܧ-\Sa#֟B4;"i Ak(>q@ajtu±e>?Y'qv|i~L$}5{gWaH =d>]4U X REU.w6X,blc +S8J8|@&d| ܥںD'jXNmjCϜg:ݙ_w莽8̊!SuHʂMbA aHX,Tdr<: XͶ?DC A(> #SRq) FO\UCC)Plrkz<ۻz!Sk2O@ {8(WkPwP$'4ۘOP\cWW~oo">P OQCi zLS'A(+o?Ib BghjE:%e¯dT7~&bD]ShP'Y7ZJ[U , xXj,QXJ EnU⭛zI &QX֍&nsO4#o \lp`}:7c5"n!VfB1V]u`5g4TM A?( +yrL$e$B{Nq JtE+uf0Cg,UTӚz[/*"7ĂM J(K-½fpa|56\,f$~U7 O3G}gOwTom >xm礫W\4ڋp}Y `ouEgM}6ɽۘ྅o̎agmϪY7W2ޗۘ22DYzxJ!<+VjXbbKhu*_5/gN-'.ӨnmCܛI&d4aju-WP﵉jC}mL" gT†zi ~Y1pۜ(Hvp)^ZƬ6bZHa&oc]l9Wygēv1:qŬԌUn5ZcJWmhtB|E7\k;@ x^%l2di :/ gFC%xdp3U0|:I q*d5(;Ru[}aKԥ fCN66A[Ҧ0Ľ70 Bp% o` 5A:G>|YV%:ZPwP&-k1A27~?QQ/ \sg!`zGbWD0҅wS'K\lC Q}: umLQNYrP1!HVJUArцz",fhTʄg?Nx@kNI4 :***Ku=xpey"S_j Ak 1V696i`rM5noeTݕઊPR)빒URh׹_C:n&^Da~}FMcd0J~Dpzm^OkC$\$7pwEO# [ė1FIXj8Uݏi~F| S.'P˗Qr8#ɟF!˿£nCEGIΥ KTK[ &0f0udMR%ƪ,t]Z8/=I[p⴮ѾF9pfv|YLu<8~ӍP(M>*@:)%̤F5&.N絶kU~ҫYdTY>4m/^Fz[֨)@9꿕 y]gŵF )I cxM 1/W$*5#5]i:|"M dcfݔ|y\C˰~{B:ymLq\֙azҕN8g'vzkBeb(XZ@,Nح~6x-۶ۘ؁F]5T]+uM:R63Ӎ—4&џf:iPN;Kp{'8y.cZ%2UFic5l\%`uw%"]b[l{ƍ$ !pw\ne&ٴ˒FY_5lIdSİfWE*brl;IWO4jR+l-6HG-7գ*I T/#¶aUhyj9%yKOUG5hXvy87&yX n I@BjPApyf8UV{0B,%IެARoOzpeo8-`(D &hV`/wۿ0FKҚ%?5/p_c 4a6nmfUv@(*<;w[`%—r%&ɝ41I>>Y2{KSTM2'սK|dR$M-7~fzmfVg%\CXOŘ GiN"CN0o fY2a8mq>oF/x+s=dpkчzIY&`rBiziQ # krr 8إ V+`Uӏew CԾ;ʻo溪0_=N[6z{6~UY2֟nP͟Wk{~OgV^k8D=(kU[8w~XT2=,<7k;_M&dwZZFp+J w 065ү`5(ڽ>*C5/sBNn9[< {W~Ma'7Ia#"dZXfTPAAYrKt\8%VR rLm߹.1-'=/}(-Ryy^(ݧδ.dqM>r<.’q1E_瑛swۢdL4(mSqV.fti4QWmDCreӌؽ^(jx{Aܽ@dRHr߹&Gknyb˧ҁY#[>OJS`Ae0NkỴJG%Ј}2scQj}w60Bla .23/u((y5b]m{Yi1kvvuxN^IXv^w6T;RsPlՂzvJO)Jg ,)$=5~338b_Hɪo@aA&k{Lkjh\V* {Krzl Vz5'ʪ-ܝu}e 02^N5X39(r)d^\=s24K 5zȥEK)`NIVe' qug2 f vQvģV$1\ y;6+9 )|ߛȡ9 |>胢%/vS5y:*189 .ɇ3" kaxKe6CQ-˧Gy(/="Yomm"'7vv3u^&j S{vO|9 -\E왐n&πa~Qы߃vZE欦)GR?ϐ-LH 86/s[գ!V!![hb_YӳxUS$R"Q 4}Ps×/%Nؕ)OL}8لMIXRZ x"1(B3Ĥ Rg8lAK^PUe! ) >p0OD<n2n m}w/ƙ9ԩ'y6]Qdgn>1AS_> Hư7M ueGG{PTj+xaz_:wz"QNd8d|<2v6h*KBBnvOK|h&V>NPEoЃDY<>[ʯ>r-h8uİy0Ϥ|eEa3pK>q<T [,gdӄ̻::`Uf(Fh'#S,HbQhgST8bKڌ^x+28F^F#1En*8sX*CY^Ll_{w^ j12xܹ~(1j \RPM ,͹!Hs#P@) {ć C W#EW]] r+=T,cwzMtT[XpC$ȗʱ&M9uhj{6zck<@e+m⫦ƢzcpQ,T3]:f ޔAsdlMn6Ef*?GL̻"^ӈ}3<8g5鷁)5g{r)8J;I2EG=|hmN$1-jnEs3ףWaݽ>\3u_P20rqA iirce|?*@ueM h2xMEF 3T>7wjz*1uٌiRk:˳;P.4[";a}qvj5Uv>|>`Zm0?r%,όDT^OԦ~2@ ?ofLRꇑ{tYf0̇uzo-<q6~򮲳ɸC q@~1LhGï.7pߗ+e.|x lYqY~fUI>vT'cN3`3rP2O1gM& x>X(K/oN]wdnIjb9ǧ'C=>s#XC{{koY[*-PƄ͘.Dž*1'\dg$%Fk %OTt>b~~d~n+_kx !0aތ"L-mU<~vKߞFyGd4<5 yey~\t/O1GI:} l8q9&k83l6++icp/vX_`|7OݫYq e!~rsQg{j=rtTݿOQ@0l˞f/gΞssj3p^f }+βEΪ?¿?? "~qﲃ ȇA%?v[& 0sNwfG#@7 ua"{X}y\Ͻ%,)n0}UMۛB ȭQQq?y\rIMCo^v`o0cE0TYD(vCPd\FΥN[B5t~mxȕp̻rχ _:bp-(PJ9wv#l>v=zo28v^Zc&ꪬҁ}=bTHa)l;j7mF/୒CRYF/Պ\Y%|^A"1t\VX.{d[V2lxM#ĩG7!Ahh}Mݑ/[D]@N2; at:6=À@#2 sJ䮲>5)#(:A(ٟ ,(;o_kx(On9^i-ֺ:GcVpw/FκwN.9lIꐗp&9?k=@Yԑ ?KNkE96 <Ԙk)msqфtAZgٞҁĮ3U;G}ƧU2 ]#B7Q :ǖ\;PK#SK>Tᓫ tz~g~Qst8E8V(NpR9H7܁fhGN*9w9Ewϝח]@\<ϡt#%&6ey.&-sF":YArWb..~tk.Dc݇>3i@Q{wqHR!`eQ/wgûq^LpACFq_J%XB*^.v8}֝NSwւM+0cĚ[w~9BV%Kg}ЂVۤe8 vRpP=BZ,n12T}1_Oi+KbޭXN 0섓VrAx0:FN Va>O|lTxHg c|NUK|;) R8Jj!gY D i1qǘhUY>QCPKWv•qa˖ol%0 N&-Cgxf42QNޟ\Q3@z/!4ƙJq[p=t83d5{Ѵ5~h#S,]q88h*/,/Oi 9'94~w/'_4q>sʬ tYYV LAhncoE(ی|bg*Zg g|9n)3 gg ("ͣڦpGl84`p1{B&c?W^g?K,A>6+(j:1r'"_OF4\Jgֳ ,0+&lo\.%FtjɥqBIl;@ عqg>F)Zq2L|)l2) u1n uCN>(Y$l3bqJT}]m}.N0 =0:F_Uq:Cu!No6Au-BưSNcRD'Aڀ *ac,Y5Bv4eՁ'uDǨa^rS{fj[^|8%::%c,պ}5/'hyA@9RO%/N pcS]ӂ_ÑF1#N|=9(xyA04,C)w>eZHÉhnC6T 4jM~X }۪8D; 8racЖ|NNx_1RX| HEPgy0Ae)93=Z<l2B`ݫГc:ƕCx%U/̈BFp'87ʂ`Y9l;swJ㯲cRoz-;/ &&r*[3"I/S ӣJ"oAu=^^BYX^~x `TW"_Yy gζ]<2W;D gUѰb!$aN>ef!_!^&"E]^Jj"2Oٸf Kܛc h4UнЏb.)AN 1rs)#h#9}4MM7Fir]ø!%&! (wyCJ"G ZnVWc˩zsZxt7rj`t~ !A0>#dAj("#%AAݤ!G?#nmY)j㚫1rdL֠FȉP }A8apa{= kiЇF=߷/]ү1B-D@ʎO^cD+OM\m /dzYGHˤ4Μ+' ,Prf',<ĹI2W#$cgCXBwjiY0Mz$eٝct;V{ISp Nil[c*eSNcTKnw㨥cT˷#ʬ&B4Q;҂klF9k8hǠH0%6N~': 2iSmPc+j?3 |:^^#G-$p>Y:6*xw-T9gzĩwNf݉ 8ͫs nu$ 3ϗIPl^>~#@:ixwSH9/C ({Hx^#'\?)S-%^,_߷xQKgSQ{n-H*}rXx,[ڄ؀X T$PO(lNp Sp!͖Ӭ@<-pvZ"s} M9Mἡ4 YHKv(эI3F9aYF1{h1^})Z,Ř0ٲy(\/ t / W6"=ނ!6"  ?,V( D^Gtݑ%skp^ \~.7'Ig .:$33U]/QNj}vkO7=WEF3)6((2 /cPc3FPP#J)x}|mꁣ[]9 )|d֟cؖC7O ` ]Q)m91p g؎&3t8w<8hm@r~wZN^$}ASf7ɟf[k8jcMŖM*?[| |(3xg\TUAk}V#˥BCsq8$7O NogZ[[qtR}.W/bd|~|;^̓@k',=w%W-* ;7TuasA&LSknG彯dSf|m+N nqZzu X ($kHbJbrCwoܑ2ltgQ@MGduMezq˙l"^<ݼ߻U~]#,6^ttfhFǒ}3I4m펯ZGs.6ʦ٬+T GaQϫ cm$ Y\kI <8L3W>V7+3 :Zk鴲T%+8AZ#wR'jh+"A oL3Fpo&[mL0sݎ ;LmP+`aky؜ t[.cZ^*2(!KU_oZZ} Uy5\(#:KXhdm&Dj?} g琖`qT('al9\M XJ!aBNDd$17XCZB;i Fk^[6¶>uX}+m ˎ oՕlh6nOͨ^F j+̆jm64CD5%5pAR"$NxARÜ&a&` C8e!"*A ŘQktHEdΔV: mtX{.OgzʓطU$`}5g;k3]0Slew GE_ؖDLbS#cѝ}eBهP!}eBهP!}eBهP!veBهP!}!}eNDT0k7^׿MÉ'ToRcN/X턂3]#t֛7+޵[^1DXC9?DC9?DοF29?HjC9?DC9?9?DG9ߧ*&9Gxh;dCܡA wh;4 r0$e!c(Y:8:A$!2Z*6b_ V%;5WjE AzQg_ն1 'd 0dJRYb%&pB3M1B8Ga2xϕpv7g.Ծї:ώ[\t瑸l>[ԟBA;,ȎA~p{gU4ZYV'wI !A,9 H#)|ejeakeWW+kTcfwwi2g?DͰ1o!"U\F 02$rՐhUlH$Ѡ!?{Ʊdʀ_z~?胯p^WM"r[=|KCrIlٜtwUשSxZAZy(r0tcZs/ވS& b $$PfN!, Z#b^ Up\GO4X&a aAi6Uco@4@2AZ y>i&~Q#vu)4XI5Y@bFHAz`*Q bA<b ;60&BqUbiBsB8.TzCI]lޱ6gMQ7(  _Q+RB- dLA;\Ҧ֦p>/cbGggrE(2aZsZ2Z]&,)*OrP|w*Um݋Llƒkaq%cU&1 rk#CUC1)~ hO~.E,PN)-rfȵq!'MϘLiAOmKVz\n+%Enbr>LYM@iΩXYE~=ʦfXh3܀{\ ~f# }ĶmmӿϯW9gk 5DOFQSa(_PV-Ktgڝ` X7/@uLb8oIBwI# y$;$[<Q|`SKDj<@v5`"AX[#P 簢Ek2 C7}rQS֣iClnL2KX(@Hmlh hq㷗vE_ǯq-ɹ h}wo~x[Eu޾jqv^v@ݛvxfq1gY޺TJڠjiKP^>L ۆ6LyD;fƝ*̄ NZ2o1+niOUC؈}SB][nK-ڼIО(JwT-y` C͆իH1huNI:V'$Z]huDhu'ZDhuNkT I9Jym2}gQao1Gw:6ֿe?YѲxrz1zC;{^h.fzهwf@~Zk5U ΊcQ`>fW!YJ1˺X;.F"'0S5ө(ߺHEtl l,B0iQpiTut[ߞ(A~8[w#[+:ecS.e崝.x Ỡu. zյ3]str1l~{ e .gmÊ^ARۍPU6 a@V;SЮ-]jl|l {PZ燣r}O\F@Om/=SXl.;]}Uu_NK s&6>rcBb\6Npgyʇ<-Nw~~j^66䙁hd#V-&4GoRrOW_]\r\e? J,o^&o?xsL? w0/yW`X7~ ^Z?TiJj[6Mt^ xvu]IiebâǸ4]#:|txVDwa;Lt0&Dwx &ԉ0&Dwa;Ltq-z JcLTsDO| yQ39ssvz$Sx$!m# MM΄o*Dѓǀp(PʍxE2ցaX[,.VJ@B"D4p"!t>lFˮ~\OA>^?C;̖澄e&?jn诓xOv|ҡ}81Y 3qA*=̡ɘ=XYVGfZ(C c/`5\aN"Kle kZM(hgFI*Fj*5xc:^)ı,bx,%[NjuE-ǣA~;ΣX?YRT UHXRPLF)L]Hh R;TP$bCc5s j$:a ZGt}{\Xi0Vaift z5[q_ XccvQ}j͔ }FZE&hS)4o!>zQv^"e|?Cao &Z6 /FZ  L}7qh7 HPFs9ډߨfϭjlyC`9wY`y޼zΖ4< 82]ƻ(*%e [`#Z]`gDFFPUB yO%4kм*}pU7IđwقR'_&^Q"%pFO*gм#"_^s8O# ړx8m1pd > ]\"kp8%b*,0gYJaؔ4nn_p!a}F~e`|qq)OVeR ϴĐ'eZ)6o9 n!8mOjWL tG_W^볱OVJhɞԦuzt1xi &VRnRchist:q:4‚S%(6u{,;݈vfo]?.ŰmB>!J$1ajCa оxnE'+v0CX: rڹ~f0eo!ؒJmIlXt5_W3uܟ-- qCt2f[_|XgQŒA$߾oZߴK4gBWNtk/zxWNVRieX{ӂąh W|zlRSd,7AO6:M4):|c1]`iYcp `5.fT]ϧk*t.?.D*`g!wȭdݼԅf;4Ebў m9:22& VPx0Fq%'\Dn]o9u iQ`LfL9 XqmL2ZjVI )ju^(t7ol' y~jql {ΗpF_.efӶ[SUx2jwO8Jv,%]+-blK!'̭d<^5|Sr,K]W>Z0:SqGrgWLR)9!ͤ[)TmCvDX4]:φTr͛v꘨ k[ G!Y*-9\r^[gLFף8Q4)b.=kdLUy\\M4j1z5}{rk!Ç lޥsMJyAkcHry9O| P0zzZV,Pgz$†=2r$~z<{0AipGχmr'E9s0xiC:IO6"I@ 1Q1%mվy6(~qs:~uPw>e>ۆ%'О2V ^xbE s;y# JNOKGۂoi⵪ ܧ ]qq{6kݝao{Ţ%._g4{PMЛ ͉unRݧ>L =hķ߬ T@7.K0}f7pbg2{o;Fztpv5fϵ xs ̖_C(^ޮuqVMoö>d>GK{|?Z̆qLsĶȦ#plUDnjQ _[bHNGF/U'j y*ٳɻwo Ť@s91ՐZfܩlRO[1J|g/ih !Idz3 I#7yyky% Ώ T??EN2L ,?w<=a{u$kұ: soeo~vMv&t?Cʨ [=@o1%"*u\vȻb,R݉>\Eou='Aپ!ŧ+ v!/x_Q1%%fm2&q b|||OCMw#+80쓃9~\LlVGB81-n+S\Wt -/k7u†UŬzv}o~l suY^}tv=lka5\D|IWFqqѶNђFю36<-lp2&ֱLˀ !Bœf;]b)p,D1S9bQQ!.(w$Z'jg\T׼ PpF95$㴲/,6x'c0&q-҉C ]2pH^6x`@0RIqFZ*(1'xY~ 5Kieii9(I >ZbLY (YK$Ta7,uoX*!3hO7,ݼ~S]Jh'!O#A~=⠽C.֓%"jLƐ2qg@QzEo"-{f(-8+#whv(@\R1e{)SR#kהDjp"DIH Q27A)PT8&zB 5Fk;NH!#YkrZHbkiKq'q)QAn_ xBGB7RzHǀeT',P<X/"DdG(y OPz|na"$ w AºŎ(w? UZ[%S )P= $ύ[G,Q q5y(`8i_ x!TaQ1(g4^CrJ$/ts' %I$I.i߬W9 e H."-XMXI%7ϚTqB0BQdYPW"x:x8 02am`Yof.|^>ZVtKH`6 9a DȌB>*gXq\< EJP0dUEB j?k;̙K1~%AWhh{}_q^ b`,j X_ep=|ԵW Bpbbqk@WCH1!= $/Mo:)%Y(B3d^LQJ$D@PFm!@VJ <n_" x]즂($pR* *DHÉhDB b hKaz9ħڂ'\ UX+HP1I@Q\1b8_"x%{/fW.hlr5BRi0tm!PU i{@²в ͯxN'o2rSyTk)V{xtP%k2yi AA+Ǟ 53m F2"]Lð.>j缾7>F([=|DB T'M[X4V Vhl(P!h5uI 1\M5]V'twŅiאRJKvHM]МWSMk6#WӐ{puM5NUqx%ш m\ :EѽmUF(^Y|b'c,?NB{ī.y2V/le&O,ܔpcPj,ڱPjfszl6j|-nۉR̛wEs5n;Ϸٟe%hu=JnCS)W.YN5R/EeZ1wAl"&$;c:OZ#Q!Rۛzi_}n "L4vjEܺc;7|w3xgi6I|;~޽g;ݼ:PJʓAōdf'J4/޾)= i_!؟fΓVpz#I͇oO__φsviimP[ٖ:EɎcmS$bdMZ0]oA^`3MRVx -x[Eͺwl+@v_5$#(>.3%g@Z=yFҨҼj ܙ?[j<N̑ X\T`u@,PQݮbuq1Xm#ҺH~ A(czfl 8 TR$Nd %):?,ϼH^w~n/{Ag'yiQvߠ9Eǣ3^>[2{>? DZDfRӻ9Z+P豉fbn9HO8|~gVcN|k8~pj)뷛V6G1vQ?s{Je>o JO!:aF`jLH4$C jP!( +sz*d@?,O[ٻ6n$W+> CMPP"K%ql-kKv( r!3o9B[vgaݯ- Vg-N/Kt4<>я)Lӝ2ǤDIͅcHb,D*+b24ɓ$$Fa ax!$A+*?񮡙%]I G42$ϕf{h5'mP5ZQP"EAu`%SqǚqOpse+> +ݵnAfP0D=t#ã+h49],aVZ" sm4=Ҵ i \yMO=ڥ=ՊF/':/?×qvV KVL [:8Fe?n7[-]ȩu?$]>z{ǿ1e_GjCC85nojh ҿ~ܞ4H#i|ҭz>5mv/t!KlVDOu`ri.FZ?2]hporw kډBBj xs΀ JCsXN&ZB0Z$+ E R.vϮZ r٨L"8GKk,7Fb79|QX (K#p $i+3Ӌ#ٷ55Xu!}6|pC}3tD+nk1jWAL +aln C '`Gz)GG#mU󍌴冟߯ɰ~;ە==Ք ' rU \0U.'RS1W~M3MI%% B4JAz˓1(8QI@%+Kqf!dn5vTn@ם hOu>{\?0kAy&v6g- NUт9[`*!)cCMr&@61\E'T/d L)2G2vg!i{u@t7n6śRpE>@}'T_ q2m#+͟XsTQm$\|}|!e |t搼UI` R2@6Z,CGiHUy=wܜՍDx+q˷ u3VA:~R"ɰgiGɰ\ ::R8=]XKm˓>"*R*pJL09s*H7DjK/A Qr%V()tskhge:%D &c VlgM{k=O%vξ:643˾b-Vv zx z(=PJC zCpdJCG%=PJCPTQ^%%(}%=PJC zxPO2f$~-_Kג$~-_Kג$~-_Kג$~-2Vį%kI@kYȲZ21EcTTG[Req3p* 1Q߆>F{dbk()"1"8SRh%uw9PO#Z&pv)/R胴Br 3 s;nm/j0ϡx+ח~–7T@GAKeCTc^!EAQ9nD+,󨦀EVxC:-'P%N&|S"g= 1o BHA ``{x╓kӤ$Nv̒dA1`͕cR8:*eHYcTZJX-YkRjOb&|p`tr})jp43 WF×(ql^T) 6SDMZ>Qo1O֧@#B"2j RFi1jtOϊ[F?=|uEAPQT=aR0wh  FpQJDp\k DUrLL8?rhnߴYxbmٴ}NK@qGaKAQ-nE)cً ʴBtDpBQ*]EEZ`.c7⋞"x6ołY)%sn1IJ$76G VgrV\<`HGkP-Kz;蹕M}kKj~FRH(&m-Mxr 쓥}{2!2?s1cJ*J,h/C5vNWn#p[*lMNV 5vTk_n^1422s.c.M>zTe`g*gSG}([kߓn_~ot _x| {00cUFy7I2>ȁZNyTA!D\-:\yI VF'ײ^{3i 4Ip3TR%\<Ir00]qwJPH39ԣnRJ3UuՔ[Auh3E- B(.Y ܞqYjxE8inAfP0D=yo ã+h49],aVZ" Kf} I0,rAH "5TYFs2tȤ͒H\ȐyAzvAz IkFh&;En5dCsѝ?t~fի͙sIѰ9`Y8,2:v>ܪƀu}>Ўۧ5v NvZ:cQBtOpY k'x9[ՒDˑ7ZOHt6#P(77+K?ͲÓAq8ͧT~Z 1t,ߵ9^vƶ+fkz.<8MƽC`PTt0B]Ͳk Sx~|rW:k}_ݙ] 66;>bz:?p 74}5gk؂SUg8~֮Cd 'L鬉 k$J#Uf9B q8'E!'w?}wtL>>e}v$64S&0 Ik4[ͷ!j ]fNL$B=h)&yn`-oL{mye߹(&"Zj'R@ Ѫ%e Tb%!y9XV'Eyn-g!{oa 4CBHpymƈ@LF*$Q,f1GHH+ӊWf $C|3JU9!I @I͘^ [P*f&?.h4?A'~ +|m6c7gZ{8_!e\܏cq76xd,A?eR!)N~Q)䈢cٞQuΟW/\Lg˴m-|L[ˌO/۪x祟xQ#U`2!M/3qI89 )ǐ:fW)e7&d|X|Ѯ؛?&y!k ͔$fk&fk&fk&fk&fk&f%fk&fk&]#fkBREHn_h;uFId=PmjB XSdxn;u Glet2󈮯q|1x,#2`Q2Ɏ#7g3S7xtYuOA |%u3vuߕG*ױ8:z$tz0t){Wbϝ>qE\X+w)%V C#cpYs6#\G0CWJW8¥vK1|?habqFQ.gw4cas!QRRw겊>Zc^ ˢrWF:í#QIy;Qz{"ͬ7Ӱw _Z @7Got}05b:>k0 Qm2)Nd1sȒNu7 jw% ~5I7>0 l]?O*t\zqXFfö 9!$\wDKTFTDKTGZMT*r VU.QU.A} d\1%ETDKTDKTDKTO*r*r*r*U.U.QU.QU.QU.QU.QU.QU.QU.QU.y9DKT+CTDKTKvxaVX+ul$ OAxp}%ӑ{%%T$5;sUa):d \%{6ZYtxÈBH0 Fn &jk 1i_)mhlc AA䀿u՜p@'=ڗ2 _@K^gsZSk5|@ˊ+c%8Tv+>\zT,Uѫ1enbHBNۙ63gѷϓREJuO*!=J ڹȢ ZE Jɤ!hѭRh8 2*6P`E61bs6V%Nvg5gK=;/RpI+\AK#kd,52Yj%}B JqMʑ t-G#4ǒP1TLi\gc>pdYBV9$AΠV\ fdKD&K%x=B`,9x*Wq`L9M N,L&fzLD`Fȅ(^\qam1f3XvQ*(t u4J@=>LDvTΔoqd+cFk<1$+Frt>Ts"W;k>c/\=`ˏǢIȮ;%AƣYaƋ}=PZ QN lH&A 89x4qr'qri)fp EA8989R1'( N N Nxz@@AAAT89 qr'?qr^|RR$(r &4}Lv (`~ExY)N䁳Ȍ4$1qVR2#g`*ݿqjgٌSË8Rp@*b,[(;},z:ΘUb3+)'rBtm;9NKj|He_t%x$1MB 2ٛ0P<'0ttlֵO@[#gA|pf ,<s~,ג[ݺQD#щl4 iL?:1)W^mޏ-t4R eQ RFs& -;P `HK"dP9'CIȣ5.%^g< kL8˺Sp cJ`ra'jxG81R(J  $p9E)GEਲ਼lG&G1CWZ8¥vK1|?habqFQ.gw4cas!QRRw겊>Zc^ ˢrWF:í#QIy;Qz{"ͬ7Ӱ#box9+-7h-RϽXu'̥?}߶jlqS%O~Yo4LM(r8ygc\;}m,3:\ Ë<ژRsyquv>h %rYz:pTɀ7Ci 2\S!&[r[Z|i;Q kݒ%x%HuĂE& uC7Y#K"=JgN$^zt3#PIkY77 YoN,Dz)Sb**lؖ1 nWm 85ԫSD -r/h25N~2?f*TV9HѲf.7 h55oQipU~b~ڛg׀:[P|Tg!JQG}=^[ЪEjq4Q{ڦzy_k~dlX6L~ n6}|?e}XVHB><Єލa/6#j3]2ph@8 U;nef tLFei{'/jy?;J- [ҋ}{gRסɟ{T=I CO{P5|tMO.y-J *@!VN%Am>׫x} O^q`&<]VWlRb G#W݋?LuYt5d7c_Zޒɓ.nI]v3UJLl[).v']Bą(o:e[p*Q6Z16 @~cmaw9s fU4|1ZyOoWy)7+]\eWtcPk H0ߎt1,Ȇ(xl}xr+$p:_~Q~b5!O0 `"6_-?J;Rb //;vp ugo//_Y+<^O//-ttvU{tT  -S7_|B ~qp^$=?}ǽ8ʋM>̢7vN^ /K!9XIJ썹Z rba24nOI˾(^bj$?2F_;fhDdwɡV6@;񷋋;(n;Vx./K{g|юۜgӍ'Ԏ V?H<Ո D8N "쾊-ۼ B n9COu쁂m ">7a,rj|,mYZtZ jV,޲DV6kC;Mk}@IDAoc8}?ϯO{}ݺt郋?}͋?^OW{(qh$ MA4Ϡ# ?~rx`x%#s h֧TqP+Q̹EAg[uѼE*Se~O2?UT*Se~OUé2?UTt*Sec9?@PFT*Se~O2?U't4EGuQu/ST!oj=z~00Ay5_\اq&SlS8w`5m#1j,1`*i@?ݘn7w~w> =x_q=z(\^Z7K*AI~w٪d˹Νˣ=ȩ"Fb Fb Fb Fb Fb"ՍCU{6 (?^/>.ݲNr\R].n4C 3I+$jsd<&Q[1Zd2;Mpȱ[ќN=Z~ PwȾM6{٣7NObX$=~8r f@E""(""(""("Gs': zH\{~_\{u:}H}H}}k;t!BJt>'V 5@0yQ,"цnFg{md _4ErbWT'vU*<W|E$}NCj.W$e_*Ѡhtt7͸셞ҵ۵蔮yx^0z}{l R:/Ț݃^իd_[mh{X:I~+_xy[}ZKr}ѽ)Gd7ϟ7TUkEAA ?/(GӺ/z^)'#v+Fii.(b:DdR$f̢k_Ok{VR:=VRɵZot5t"vxy`qDKCDMC򞉌}kxidI.djV+MЄm|0?n,Ŵ[5C1>WL+:1V^J{UUuUN6󽛡;ER'ߪ Mz!0Fm1YWS t5gld=P@eW#=?'7M̿\+HR8|*\meeL%OOʒ"V.9RV&lbsSTvCKpO)f_2st[qڶc7Ҵi,5%jʵfjTUlΑ*7xH{nopZ #ƹM#N{9[=:̕|O|_g纒g+Ͻ o{Tsù%p[yzV=o΍SADR n[U+f3fyy>D@J2ճHS}x~|vt wbwG8[XU\c|qy6$:}vUi[ _> 3 7nsס=:̃>1,קZoᅾ )ytǻGc( C~'SG";Mq)Z7[r?Gu[ͷ_89~u`A}~RuvH w]*56ף#Yya0` ìkl:X"Q`Fwt>˚uulHntǃëc'G,ozm㗆u N=sDѲ˃CIܗvSm|;kjQW_1HOኩzX)/z&o^/7j߾˷}8;9No{z͇&3th&CO:o2nr#lܺ|*7Ep9|ؙHnf~qJ~tM.qU`(IrހrgEs@zu:ʙs쩹$=iéwY 痧x]x܎_Rfb*EKdk*qM~h.L)fey$:G||rxO-bAd6qAa%nw}>W;>k}Wy'%ҵ.@e&MPpZ^?V8^?Z\ݫ>r9U6$n O7J/P=`z| vήλ|+;X5*B6nn0r$#qѥ.߬. OWV;l6[l`+)7CϨY>!՟Kyb.ZF48Y_e#7Ŝ$9qXåw'%q66X3q3Pt3E'UXz}̛̒y'+` kش }wOr7l joFimZhfaX*s*Qi`ɘFKkM.i9C1~TZ6RJdok`od_RV НZی9h|WlrmfZ'aZ 67-]'BN2uc5cvWkFJ"7V[kYЂCk,ZØmZ_$n#S-).Mm.YC&Ґet)Ko2kD>R ](c5mwUaMbRn@$ȫ1+;f uk)d2 h aaI+5h-v`T3F毲shM(W3;lwT@=ƂBjT?X =:ia'W>?<q xmRҎ±%r;:IyceU"7-7($[:(!#<6zg3(x (W1ڨbj8RVܢ8RH5i*HUHFmXL8l d Bjc Jz\4gPnb2r jCٿsp%MCB*>v`p "I7Xkʛ: lZ<`ċA+{!5ܟ`J8nG9>X2 'I֨zR4*nXIkn R{p4T6fs˥|Uv8.ID-Z*cQn&%T](MV3v DB U^`aܮ,ζz c*K繸x{v;3[μ7 92^$ EȶL/FFmCsA^7A7:nixh!yz o%w7[/~pRH>\)7W=T\cUO U}f}rg8_\gP\)7W͕rs\)7W͕rs\)7W͕rs\)7W͕rs\)7W͕rs\)7W͕rs\)7W͕rs\)7W͕rs\)7W͕rs\)7W͕rsoR,K#r* V"ޯRڧ_)'fz9|-1Q1Oyg'Tp*]* QƒX.ό倥̘jji?8|4Z!Dz3eꌮQ77KAoswͣNl_JFhe,J>T1iik8Z8&i:8m/4 SsE3 sN ëQ?-=@jww{{Ej:+C&T2`xQI1$JRj)žgb5 ޠ#m=9km<i}X^] DH9Hm2{u*\P"<33VXGk#A[7Ag`>ZGn:vͿnv}}it7bgi59ݔ\1(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;(;rega9>YhFvPJO^vZξDYx„.;\DbϽ p .MO܈ti˖TJ8*sUa8LݬKi=\VStO@|] L:{lPb8ցy3<פּ Զѵ[ μty]_C)۠蕊͇2u֋K^R| k/Ӡ3 &j^ӹkz{6x?~4إnv[5~ }]ozMͩ^#aK}wǓ5ʽ aygz*0I$ >_7^q/ב_5 [ښn^l>sLm\#s52\#s52\#s52\#s52\#s52\#s52\#s52\#s52\#s52\#s52\#s52\#srjY1`=Ks \C+@%2׆MɘESY^gG~m4x_eX%8RTn-_|,A {-Xo3$o?Qݰ"\}$rj)W#\p5W#\p5W#\p5W#\p5W#\p5W#\p5W#\p5W#\p5W#\p5W#\p5W#\p5Wvjwkvoj,n$/oRy}Gj!\6ϊWsw UٰZmNV, d1\΂rOCni j]%,GTR0|w#lx*'F0xvR}@-Hh}4-&`ma6JHCԔxA"F:qU3_%Up`Ң i񿘨<\\=. nU(cIyKx, gFrRfstZs´Jßat\mq9LB\egT3];`ލRbЛ~Q' SK[/%ELzOH2 Ta`pʼn45NL46I>ɘsHɾw5 tVNeL2cRɓ&&+RzjAkkqG6'Czr*y1Ү-xP)IArtڬe>RU$j!+\?D*9yg+g SFCl%n&ea !U}H8dh{&conp7p맹FEڑ(˄T=NN!ܸq9'͍/^pPM5睿pf6-uR?vn:_z0"AYG꺤q.UJ8Vv܄J;RIkm1QQL_+?w\V2ML[N}sЏJ0=JbӮ/GV#?v֋wi0dl;7zM6LfdbOӌ~&` Ҵ-k# {fRq}%Uu1WE+|Mկo `NvXϬbܑ9Qt[i1G+e2~XZ,eMejw_l P*f #γd۫@@@Czr(كa.]<)B; q1DX^GpA&ADI*və64[L8i9NnB'?-4ҝ7^M`dwgObOW]@_}|t>8|ɾ5i3\kOh"ϑ\Ah޽!ގ8H rV\ NOdVˊRQ!8Hk{TqM{]O1R Ϫ?^H'nMN[/ *FZϘ'ie2v3[('ft[^秄+0|mx,f aej53Hy>eʖGCLE'HJZ‚phcWOqz@]}Z)ѡ}4*ԔjCDY$SZq"xrv\*G$ A驖} IdBFET! HL%x1IED4v !\!ѲXz&1`H1BJ⽊0CĔeT1xd;ƤܛL+slQyޗ]Z}B۫?Ҵ??* @_M[*`-* *rW|ӹtO},Q Ϭ-;?g~r޹HR`S}C P6rF])Ay8sIg)LWgow" ^[O] /ҒRM\-_57~~녋zCt4SW}ϡ+gzcK}>wFD#-WB^uWG*ir: .<[ivuk ։KX>jZgFĎk83?l1[uοrMlKc~rzサ-$ g:U5 U+)x%!_22%4^fsi0'ҋe=Y_w՛qsƫ2rU7tզ{5Nʮ~Z\BpʩgN6vm'Tr~yA7 Cڭ6z.WwJefN\}4_e{NXnտn;nUHm~N.H o//ex㷰˴N:=kh&zP:.?iں4\oyin>nump_L66Q_7\z 1켼f~ /o_S3>:$>khƒ$pŒ%Yg2GyT T)r!J ]ɝKd%IO۰]NPuvlĽ'gNX+#.Ŋt$[f,fH\b#9Ii6&my:27n0m08KPMQ5h0]+e]+'_wR`ݵXwMF\[C[?>HD+C2UxJȜœbNS3'ɘ6U =^AE FiN|.oBDmRFuTUQI: ,Qkᕐ)S)βԅ֚;9q<<$E o >l|q1LAJ&rzjndmq#!qE(,̈́$ F̐`qyvRsЬ95o+ƻ6Ovؠ_M_PSb8<Dv͋ooq;81V߀ELlG< B ] ICDED *3|d0$9og-="Қ Bjr$ ,M(b|̣P5爊Eqq_v%]W~8ű>>+cC3յOE3im+=ou\=rj^r(OJ)O6ns{̣̄&v-FܦNQJI$#G $PŅd.CBBW^'[ p5 #T9@fS"''Pur$Ֆ!D<4y͙+xDX(V J TS050$%h uQ9m^X"%9d 4@wzb, n-0n9%-3{iʾP/%SxH2"rg u!2S<8mVkd$^f"f-vANR $ ,ѤL&Up"[ CYFL(̬?{O㶕_!fGn䃁ݙ`nՒG[$,)lz&m7b:ޫWU%GWCnP7+ phU4.ڢH)3RDhL$;rPyJ,֫-1*0>%h MD;aMC.X.N]D:vj658{獴,|2ߤ7,M,ঃq)!Qmt ;%1.:i s-h%j+{EvA0IYMgOyIbvSJ%6X Y?NNjI==s.(4 Â=\l@B2Kq/Itp\Q5/tf"gaEVA3`_;QꮳAƻכ{{{^o`gpܔ$/)~~TUN>Dؽ SnECUy4KL4DH;ۊZ}6%tJ֬"jjˋ)mr(^3;3|ZýL(BA]zWYcn~⭁ %e')?`}g}۾+}mĈVVφ_ bI.TPQY6OâAlOܛR8=JTi˹3#ppRp ]J/% eO~4aYsW0ܷԵdZe@r{ṿ弪+^Ԋ rb #9a7 ?/?xMQsnb 7ƭ[Sji {7n|w/cp (x*~>OK{Q,JePV,.$_F?l5&f~{R9[pΈf:a1F(Bwߪj_?6(`OǕ 뤓Ey` L :6Zjxȧ9P.0"ZdXc~l=M=o?}}޹9)Ҏo\:S}Ad3ZuFh:jӈGK_)F' f)S4>"n -qbÁ-[;ua?:$"ug:/_ (>Ri_@/┝iF0Eɧ}_5&}ϸ ,3]_HJgL(^ ?CLqZM; (ƥz:~,;;U-e&?MZnwdnq9 q)ƳeŖyP|1?Va] KWem'.ҝYoDzQ|7Bj+n}3:"}А\nYǵ+rA쒼vG>b%%CZĈCG[ue!/@SK?SX+}x uW#b-Gf]<}Nfq$}gǾ>! b,TWNīK=<}MA M`lDz"aT~vA%FWa$-`B\H̅>[aem]h}BCk>>z̜ݍI \ٞ4{@ZZC>1;M~{kݨ[/Ԏ]Dms Ydxuly[H.ږ~zM^Ajg!u5BKO ~DZ? ]y{_?[BqX'vzDk>nāx㤁G< ͱi5fb]ɧ|yx(|A-)(S¸=)ۑq'"9ěBMBѶ7_L=Dgާ jDfr%S\O犔.Oeb `HdMҙa&^55 @67ŕDbo9V,hs.8##M}~~אV~AybmH}q}/o7׈lXo~7y{Yi dG X=&  ,J#c1Fԛ@%Jr@B$jrSIGJy qXT5T:r芤^Ssr—n"5 n0TaR"q~he★brQfl7az7s1 5#9.3Mjϻ0T#ypxxSQ\8L3!@ĂfK-XKMDZ"gUm-FD4_S(_k"rԮU>nbRm3{cL#Bc%MP>RXQpD0zhO=VX]D5 9T;pځ bVgשtt+ x$/ɫGV6c>QywC n6.eBBï4M^@ FfX&fci7Y4,ގ*5OI|0tU-Z (K7Og|2D^!Ѡ/?S2x1Y4ǽdۃ8p7GMRF"Sgp#,F:gE<@tH% ÿl" n+d?0oG@⁛]/ Co<5i#Z(ڜ-rm| Dcrw6ʿ0 6az5=" 8̨h0`{M㴌I+EY5Uz\=}oŨXj+q~*H~߽,-.B'0&]Uڰ.P0vYI{x8R$o;HysQ ->ci]vY[ !L6Sh;ܟoC!qs YZS"0\Z ?Ca%]e,n.X_ѻhٕp}Xػk 67x?Чz w^E.Q|ovْ0-G%y63O}m4}6x~;a03v55/ aVm!e-݋moiZ R N/3[;O3aY~;Xk**ST1X>NQ{y$"kY"M὏u,`$^S@z/zrֻ}nߌ sv,x;(\2eU:̓r3iΆ#~ۂaN(Esfͭ$G~Ze71įU >a&C;48K?ItP;Jfb 0Vh$RADnHjK(QmGs+:2,"0/ba sR.k%GFrDkϝ؜P*0;0~x>ɜNHf@v秳h[C/s&Ӈ>>zo4jқ9`Ry<"TL٥tZ[E |q5 AHRVa S띱Vc&ye4zl5a3q;̂ꐆGS +>Tv$}ntbk;[޿jhGa&Zj8g?d^ܙZ$DS+@*Ovn3}[czf=ރzX[cwש,/;\{ay(8 oyy#nhvn<"~;؍fN찖[ wYz/Oel{g%&/5(imdpHJf9VK :wl Nzt25}%/DM89"Z#J: qUd^ (WKDŽ71jv0*,<0 kYem0S}owDQ!0 OqZ1n #q$%!:4Sw37U ZrΡo[.Ћʴyu_>t.m?H~?+՟w1vf ]DC: vd}$~F41c!]gv4Www6xu{MMGaV%ocPtByt\..W wo]/>,>!؜B}t]VHF5l?N7)Z^>_]aF E49F9oA53`vYӆ'.aO0*rЦ۝bVO~_N>x;̖r^[fvIތSշ0fۋ9ƫjdeM\,jU5te5`%f~| ?'܋y{wӎٽ4NV:mZWU :q#b/Eߌbg:y8'g31ǩ]?[b~x||7e P ӯtëcw"*X*4ULGn~KP?"w???7?|"Tzo?} qW VY$;}Ռ6U57iMՍzklzX/lP^%Dӏ f4[GV*U|Iw-)vYFN_P,,Zj'R@n10萼]V'D16LW#̯197,2ppymƈMFRPm1{<2brȡQ}76' SvNAs\U'D+NO>++gei;v,>g1L zxUfHqY,G\pXqrQ[$\>.-6k!&SE6)'$Ka3G a;-~{:8ͺGoߚ0+-5?IT/&p3Q6*IW1@Bn55`QUzR)& c& 7M&֘8Q%q6O]qrn  oi$|6DS`psP(a|F^ جTcy E`c$)J]gb NZSY[+^Ͽ!ʣ,mY (v<\oax ÐkԲ&G>\@Z8Γ Y^4`9l8E3(fV,g3HJGzOZxPY+cI1yl_ށ27 cQ8nヂ -s-B Zȡ[ݽ(>uhS9$dM Ŷnq2¤cQ@@&%6dn S%o d))^-d?1hRif\PA*8 9 `cy…v4h|_HסdyNqF#)1i-s-apA(bl| X1U)!>BZcAtq׏fw;krΉ !{^!&(0C() dkMr+J~n q< lZH2/9P)U~T9!tMd%g&_#3Ef 9}@__za%۝7#?^Qv[iur7djݍN|uG;j~ĞnD}hnFvi-d?z$FGB\d< 5 䢶I.Q1Bq.a%߻+<ܲ9ײ'S(X6)'$Ka3G aN %X`)67ٿpA:fng#xBW?]ttS<>/Wpl̘Mycqy.s. / LÆQ 9F-SE#BG#8m#8[Dz#aDXe $g1Tۜ:kXa<fl>bsB j J1 6`T k qD<apԘ8qQMrmL}Źv-/WmcM4i6_u@%LДED{8S0dP($)J]坠gbLZS ˧+bЖb%7o qC9Za;[k(j`|[ bok&X/RNMEv+ XFiϐ# N G )JM攼d!/Yd2]W1HTc411 s[ <pF%R 69k5@M"SBUj#8nI,gYxEt+{̌ '[톢';EUn.. ˛>]yVِn(8x5?_VgbԜ67LOi` ߦ̱d϶}V,YZG"G)z2A\AN&#H+:D "$r|aܵݦМP=[wuRE E< )8eZ%jW>iz?f4¸A07l(O`V`P1,Q"!fS5SM$I/ku+OHe CQn-B^s6JTaJ AF+hEqLtvX ZUYxNBHTuYeMՒMVHr- ^cTp!X aɹ2&D%rVeik1"cC&zr(fIQXuhvW,AG= \v)|w=E]Vm zf0}'އ]|kXpkOqKi#-vQ+A oGzu'ol$erhLX$QƄ !XɁՊ{態L kD<`Q\ F p|gAjWJ[=C3sÒG{rX"k I*yd\c`/x y Ἴq1j5H:6.[[,v1#gio\0Fπg7llmHb"2$ i> InkS*r^+y1ՂKpB.?4O֋O`壬>7L/|StQ=62TSuMyp4V ƽ?ˊT/iٴD\-;W u= 'Wt:nևO@DŴ9'>W#X+A:uV)]~$c\FR篣KM&"3|QY٨\SiBR~7CmR]]BH.?C_+h4ZlrT|8jkhͧ _Oۅ__|Jo߼N?͇o^p|W0/!0O{ =]|}ה57*M׭~uMmm~~z_X2Eʗ*Di#f#rZ%簾2M׃P{ɱI =:ӒS4a+`-d*3θpO!K|1\C!f`x#lp6HF7g)Oc.*5X b;]"ܱ1l\vERZO6i/-.ðTiiZw"z޽oBjn~:}WE C;i F^zB j=DSV,RݞrߤZ@Qv~S;V mTq1q1=}9g>.VՏMB8u ~Y8G,]1-Y1X,aܳI9DYra .e n$A:V]O;Rtwj߃n*uw4yp x$ )[X1c2b=616[!-%{/ BDžMBMV^ IM ר]QQ8ҒP@XXH6x2v 6eIU m[#NKZd}oCKɢM T)Gk$^H1 N9ťoW12#XP1g&"k␥kKZ(}tgT,w7CC'W]Mꁞ F4RPE#K|4 j-$* %ٻF$W%ffD^ ̃ h)q"eRj{1}#(ERRQ$ЇXɼ*"8t&ȌmBrHmڐNt+,vɑ`* H)&H„:d1cBIV3?0t<+1k8cJcI !JsE.JkH&581 k0ZvqZi䴓=0>t}_]ZDM3`ᳲ/x;G9"P+y^W6TR_ %b3Bdɸ4yF& \t7q>)Y~M+{ڽ”^xHk I w6;A {Ҝ=)[AZ!VNk}lRN`=*)Xux `0^?}x6_Vo;e:::Ʈ_jy)V'_mϤ| SʃpLh_2v9_ K#|1pH $geJ:TT:`g¸]vY1JI *Ĉ:eyZ4r!5JJ\ˌ@S #DਵlG$'J>GO0~^Am?jذηXX`4|iL3v-#x/ E.I` .BN Â$:%EI Vs:`;my(4JCj-i:aR {GYo;0 WCZo#aZh6s_s@iIȜxuVc00[ym,pd:Euh$MN!~%ZZ *UWFx:gDA˸"u 9v>^AfSst;`;}Vc4cI82߶Ul AA=+Ѵ_zXbN^p 8*NT,PiA8J=cVvuF}E.:ڂ] .u),/W6#WBA=Ƙ4,#}$mP7;ΰCсJf5 ^B]ގ^i< fR2fʕ4oJ⻤G(=>DeÛ> ԝ[Ļw^\$7Nm:R]r mLh'uEլi;G-.lIy7eE-!wi{|x{D;r3=L -n߮y) wy1{^#oXYK\,tڄ 56/>N oE Yg~ \R')SϪR *n5ty}ʓeoEߪZ$JݤOZ e/Vdߗq*l94KL fZph-s^|m8CƯs6z*/($s%zI |^յ{vJ˥d<)0rXՌ=.KDa<"F>޴ng?(H+` 2ce J8HD3ef[{_'jA>Á4 fէA"5BGk!o0+㛎ooT?tw rA;fc}y˟w=]CCa-jƬyFBb>v||Y3pfǘc1i6U1 3ŭa|`j ]O6rnԣԶaoCjItڶ! O=:)wLR tOsmb}38wspnhG/PnL /{eO"! 2:2m0gsCSΘjhoe{<햳R.~Kڸy'?h(M[J[C>>J NtO'SL੔^̒w{baE=Wy%٤Ou j9,G,(4_o?}ß͹mR. 77=S7$1L=Bf^V*)10 D̸eպU)=SzG#w,80=|,w:جlbK壘Y91G .2$xZ7!-{iҿkK)0@"`!*o*pOC[^Wc}ujtź$S~ydV2amr*E<IlEp΄+]W:H#ܥą1P!xc BC@&V|ԖhYĶ]ښΎut~Lq!Fߎ NGyw S }$@'Z 3LB4dBO;d)I"J_u Ûi -kACUZK GX$hl48"9b@Шbrǚp(ɜƬ݅#YkI˔)`(!"L 0$kK7DŽSɎBSR@饎] ʤD lQ+,wHOBH1+;Fj`X"ґXd2F \;J {IB$#X,u g;*lr,W ޅOŪQhuzB]_.d)D?[~IbƮ]1c0zwFԛ VUc`f>{f03+,)qZx )傏 80qv@MMI^ŒC.0F엘U^I맋zfо bytPP@կ˽ib=G;{}\-Dkyz}y_^ }Zѐ, DvIΉ :>-:;k͚Z6}7ޯ5yc0lo튑7~,-7wFAƚFt׎rHMÈaHKhfX>4vb9ǣFOo\7,p8`G],iԦ piH )K"'t}3Mqg7OT߾RoG4,_Q\_Aqg%' 1Tx{qxr9L{aE=<RghW$"!}÷߿?|||_?~[:D:c0S& 3&` [ wU7|qmKnwǥ#^.c-B\Lr@xo _.sy'%F.x\P߯;xJAdtȇP4d܎AP)֤F%MA$+LQvJ[s-kG$E}0;VFfSJxzI:0jUh@]rsTzZW)W;'Xޢ/\iR/s*~e=)Ru~o^5Q,1`u=0mԒ+in˴]b"|}{"[s OGZ  ej}^V"@b!TP\˷R0|%gS4a1ב&%+eE9OdHr|2'e>iQ L'@_Ui>cԅM0]@g8^^?ap;+s'˼),,qee X> mzy7,FO\VTlo2M x$ )[X1c2b=613"UU{"1&S_9!Ir5jxT~8!,$'?R,,VJ\:nUcox1{,-mu{'Eil i@6w[I&ϿkB})nS}LRf~Gm7 ,3 ڵ(:rjDindk)U=,V?b4o_AaFɗpe@UQ` >OPҥ²Kґ0хT9dځ"\HLe@YGf3̆! w|_zb<ߧӗwug?{qןOa'XNA-J\TnH&Q\~1UJ܌"z |?k8N0NKbHWӮvו$cB /lPqxO_Wzz5jՄ%QM!8n0lt4Zl9f5KN=\h.gn@իྥ 4,N|kf<ܼo%NZg*^S9<W*&%;2?<~~1/݈q#sa`]Љt 2ޱ:bJ2FuAONY/Gˉ)FLZ?L%>:'v+>Jq]  ۓlcVc`Ył9 ˭˓ubA@S)`\#%˽BwX?828.%W?aW.fF\l.;BVpO_~T ܖ,0H]{vw5UQeS"L@4X 4*4AR鄡/?}A! ۾]2g+-Zޱ\meWE2Lo%%OKUc]ʊwlޥ֐Նuo?;;ݨw`~,;:D`Bs Ҋi,<"S4:]n{s[c[q[Z lya"6]VnE޴[ @c'`+8Ps3o(wBH̴5:Z !4jJا鯃SB,|31uAݴ/d|Ȕu6ПlM+,8_E%eDe]W&j*[T1ry==LS~Ebt5̍ pYnD7w?ֆ!&%HѿwOq߽\oaq6صk̚WkV}UŨU .l1Bdd".'L&Bl"DNR~v}`f?M7suA9|]V|E'1jjmѸ3Izǿ2ppφaVSdIN-)l'7odOWb&V( rJaEQ~i0sV(pIc | |'Op5劑%M"KB> ,aB-$WM GߢJ.(Bj D`,%4>,dAfwZ<:9]0Ew0q#TVE)l2-˿>`~䮞O"xC4{'sx[߸DZ0`L[F\Zi6Z>wi0+G; 3-`Rb [qSS L*-൱Nx!vJjqaG DcL39Ad;p3"'+&SDcg 1Y,-,xGI4 G2fCI<= cA̜1o0.+\5/ߝ:\pN>ySlc^k1&x$;7.bÉBNQti4\R- ()0uϼYXz6%hɎ׽08U3,xC:07+?nø/4%yi<'E@D|"ݟ8c+aY*͘ ZGυPDgh՞<#Hban$x`VtvT7O:aqeda>E]Zzٚ S0Sς \PϏ7冀<`iz+!Q0WH1 cTYRa?eaI[zQg9X_(d% 6k 1)1BFCzYsL(QmGsȰ, #aREb?D hm6vY3 }IZ6xmI8/5HR8Z;l7ܞȺB/G11֜d>q/*'KK-1*E --Q2B1Rl)Y~L؅/v|q^좙`WQ=iȱdc#Ns)_ `7 gR W(D }L 8{X&Kl/w܁E#<)VgF7(#hRMɓ庝ץn'%H~'K\&4 48\gNSw? ~l̷k]͇j$~0,$*lp L̒c5yPX?}{yK4:Z^Rwᆭ0焴֛8}wP^ ]A^^JE54W,SIѨ^i2; DaŒ`]¬`1by sR 5(O+L%N{O:8Y=%Q |P;Jfb 0VpZ@(D&..)JlO)mx1ʜnycϊ9#fkKյT%٬{s*?TI1Hhaq#tEFEs=9Ry,#f XJ]qKNJ{?mM`B<UX-zg՘IDk1hFs_m[T:[4[t2l@nL{]h+CaLghCuC^5*/-úvks\ӥ:zx95MF#:ƍ6)5xŇKu'?[oc)^6M"_)0_FcD2Y+냉KM-a$ES$$R&re3Ůl vWe_|FCbUu lH1(eϗ3{|v9nX=S) C=O犩jj']zJ vFIqFUp$)f%~^-$Wm~yL?V3{yKΆ5m֙u#]:,P'Π?\;+2zǎ^.^M;yRCYA@'=}+ԡqx{]Rg 8ٻ޶dW<&/y8pwybP]qN o5)VKcM٬Z6N,R&Dď\Ք GUj4 aH)9 VhDP{b&Y'FN5:NW)Mz ҁf!B!E:M 4qt.Cb [,w#~fN8&n!x5'UKz{c˦J/ǤҧG /&IdcLaFO'H$zcSřS*<߆MVnp S*ruy~qbm )L/tDŽRj^c-ٌ`4oסxFZi|Sy%鏙k7RZsWag.xel) M&e vZrj / oe( ϽF/10KaVvΘ+o[『NN (o<|iY+R8q8BMÙ/iGXH#C&i EԁɠM26OGlCW*iTG:t#'P$*-eNoo>6O?t]& s!R\BQGNCp,:Nɐmyxz`*;GoA3hXE:xI/Zl7pn[VvoNd8ǝ8}C1\²imPiJ娴o>OFQ roB`cC<:c-a=%좞*J!((uL q$QTB e]#`=Kq[.m{m>ΖfetnZH8-Fp8+\H!JX: V7YeCE8.fae2vVvw I斄cpɰ!a֔^vo.Q$iřYw> fEnSӫw>ts['&l'~BjM#?l9l=e,Kvf66(`~t8ZDm^6_tuс@}tjۀo}B-rY~[܍T^+ގwګߵQw3Z~ŇZuF?]sy5:0nj FE=he 7GDt9@ry:ǧw>ymN+6kDiXr|cIhm|0=}qKqC@*>ۮÝȵ9a>t?aX!+{M|Kmo0]wfE |38g]8]_>/[=p90JbYz6֕: LqDX,bki2|[Љf6rr}rw\{oQ jqE4\ъ{|y 3#*<;B;f4W'jNƳ-=po a˄<>,|v^ylI1%Pd֑(7Ȅ[rZ8TVĽӦ-wgtI<$lHtp.ZUh:X*\.a(qay |Brʍ,4h%dF?d 5y~/=/Ί=<)Y!\^C%NI/r,PReB23g V@6h*1&(q(%.xƼVu \OڍkMg;&+2O]WU_3Mz^阺Gsi7*JQMPsmTACVb\:JWf1KBV|j!sgo@b葴6D-: =E.f4cɨ`D]>AZ!!c+LdEzۡIȤ:cJ^VjّZ'6Oco No/4`<(BGPEiD"֐M,P*?; ׀`qAT:n Y U jˋ>o mV&DLj_ܗZAC`,yx4:1$DHTrH=GGn1bcFCB?j?od߫%UHk1?|??ގwl6`bVȅ0D^\sF`9$CZm'.hbpUܮؔг0_5y" Vs+#F'`+ FR&1BVI2e$}rK0ɗpG5 y*w_jVk7|MT i!}vHE@lr`є㘖<ΖhO Gl'/jD`5R(rSb (C_/mBZ^3U|PUyq ){x<ˀVc>LQ ݁T 0thӡi m* uq1]E{}e; !-Ì=\7L:2Ba.h]pڨ>ՠ"+$B eLdAb̿M~ʃ 0L.MAIGyY jT9@Y ̀shrݕB}j 8Ɏ/~P(M|[ω\~՜}WsalG(UIW4Lp&8!c`΁_ؾw_kHǩ<O[htGLAqR,Y%,WT}m7|x#RiY 6o6 J$i*ȓX|=<‡#o,Q^!R DCyXMȲO8J[v=$K<&rBtii9shLwT6tgy\nT+qd` 0t*:fKC̖%)yGubAvB[GD.KRYjJ *5B{K`0$yN%U;ttvt;݇ }95Y G# 2*{O2CIF * I_P:,D:E\PĿ@GG2D-t?3' LN@%fv=eK1iidE$L|t9 " 0Dԓ$2Xxl)yϵmg<;3m}wgg7!wϷ4?f ,8B 7DxUSM&Jv_!`ANjY 6|~y<=X%FIJgYrBz\x#@ 'KYAQ-f{E[/аݠCs{?.Ϻ1;xLjmk=9ޠxO6,JZ!(\F @L#$y^a:jyKxwA!`(weﲘpH̬Pd4/_d爩%rW]E{:*Rӡ7^5a)'~BlIRlJ}懝5K%#//ڙ&if)D~g5u?-~0^6tt(?b Wd#{CCov[DMyRxlKz;zj~Qw3.>X|ї4]sy5:0NT_ 7e{#{׷y^zD8\MS%ht4 {sOo>LHdV zNC9arq _(޹hxwME(UG'/ԞhՁU:KP3w3ҷ!">V C1ToTS^BWWJJQRSov9m z4J m~-.qx=7ؘGc.&Xm6ysmWb9N/u;7r3&Q~WǚPfU=)'!tVv(,oPiC+|Bf.U>L=ٹ Z+#{rU XG5Nu$w,Q|(TdظOe6T?Y~&Ӓ]>5c e!"LR ûnygT?> *'C& ܢ| yՏ?o)W?:-8y Im/ڃϺկwYS]cӮ9Mnt5749qdb׳~] ػ|3j:JhU{¯V`g\=&-^X4DZcA)ܖpbZaWrkdl/0E,=_;ʡȝ#%:+荪EiI2 ,f\\4rsRCr.G;_ǭgUmq-eP6d.[)@\ ZR\Xd$FKQzfe4LXބ:5sUώdԖ9{e5xuF,|Y]KikRɤʻu>l[%̏ʁ>5`C1GS)A ,RZQrֆ:P68lMONV[P,r ]T3!"R~XhMW-&m ;mOs!QnL]َqXE6LۇA""\U@/t]@CgX _ T%sI\RzB݀ve".E)2SIǘ#GO!™1wOsNTG"^"uzմc誘:"u$W}VkN'afE%sig.Tf.}K9`6 +b]؞{zyu_߳_hq;QM6,oz~P\*lqJ FvPNta <>юmngOg@OA> MJ?a04 8$ʜ[4s6Y E| $s*.Q `b:4ɐӆ1uGѩDP!o*<>ЋWsExӹɧUuڐ" w{Ԫ֣.+r!O-$@7,(hEwLфY&|dGeY2 ]Q{uzv&M0& ;# V\ML0b>ej59u)S 5^$ gW2gW\WZN2 Zzprj8yFp3*\*S+O~s*ST W/4' +U&Xܳq3N2u_"\g\3 q:-9UV<\e*h%•&JeX8ژMO&L;WNOW_ .NNYRVRUPs& B(̿ SN&t&siӓL%kk˾D6J/Z=\p`ڥtCZ@vcF̭T1RBLb1wz~Fp3*2L \ej=uTJ +sFpˣmGC޶VUH W/0Bs ב9y#y[,"#ୣp¤uB9;Dߓ=XRMC\X]Imķϥ΢=DH1x&I5%rIi z P+#(8JZfU%ƜzS%gP`LzE^nj{^J5C+J Z 9uI"gJd iK)KMQbCq y Č;1kJ і"!M\);kZ)T=I#vxߌ@asb(:T.]hY!Ȏ2"n5#T&1O Ku\;E26[P;h->(e$ rq gr,/cT;)@T_o 9d*F =򰸾tR]{Yyi=žPٵwW-+Eq (qQR>d4Jk@[Od"n$K/|I|ڹ$hinV0W4ճKc.T(-,:d`{9TɢHkPҔ"t5g]DcK( 2*U4:]mZ•,M Arkxz9~]#Tw(߆3 WF3)ql^tNio9mU&U"5PoGdEӀ*S@UF#RYAUXG)F.y+VcwOIY'~;~uyQ"O4yR23J FtQ4V(NQ'[h8Fq|?6V&^VģdYygw?+7)V,^o?oo餬hFU?Ċ"JK#b 6PLEolT hjs.:M}lz43clU9£hGcvІMQ^X.C+,m]ccr;>.]2$r~i@B]~e-M)sNWҐ`-Mgr+vTZ6hMV47]CBpEw6%~`Xs)M"%ܕURɈ16τЫEu7ޡN9{4n maVU{`>sCoҤ}Z]*Ho[mEO۝jaxpƵD-p#u|ەCh.j' @ @#!2m]mso\/fO'b4 Ւb}omUEkA=ʩ(F&:iSdO^coisO^춫KǛK7+|0޵2]߻jk oB[1ߠ7ð_7~/m4?֝|7#OF&k&O_4kyN/?fwy/#jy^C=7?o3WC9ęC'%o.^-S٩ǫe*E$~V*I5-<qY${5슎"~/VG|SBE9雽Ƶyqᜎ8Wu*"y!Myޫ-хʾHuW Cg8w+F;~wtTќ<~ϙVMВz"+'4Z0+aʽ&/Tj'yIA9g,S --#Igp%s^0#QѱQ>"xKƥQS1$X{1?m$Qns(P NwFY\})E?-ys G}Ұ"wn&wem$Im))Ke\H,YxfY m# VWPGkyucjYһ^]l;y+֩gm]9[yNvvLnڜ-'-/eaweٿOgtr> |},o=XPtng"yϜx.͂,-S!h=ad,YUGێaIG<{t 6܉.IA_/E_ixyU(oEMʉᄷ#I@._I+7ޯFꧣ_װg}$.ljU3<4-׫&`ro.9TT*WU9MB ˆv.!u剘eyey<G(Bs5hBc-іrai6xѤS$OW9?AZe2 bԚq:  Z+xlGn~t?)ooFCiPQsRRhҎK5xMgSn A ѺViQb(ĉB\h+PcA[ଶW.  M];;rC>I%% B4Jn-$cPp'RԬ^S XERlbK+/TsچPv+V H@T')U1 K^"(D3ɰ3b/zaT9W2(k{&9veb'!Nh7_5<nOaJ]ٙSeN}Hk^'Ax,w +yBrLj]}2񰷌Ⱦ˽S) ][U|}vpzE36Je*WF"* xɄsL8pzWcGNQSfeiFlޖs'YMm7Ky&ӫY ]أsl@ϡOoHN;kd4Jc,o5ELhR /E!H]ủ/\"9|S'lM=vH^|HG!RP²4Ιd_;"EOL,Wy6XTz xՇȨ7@Lz6__]uf8u9I&';?&M?ŒV)#Edfpj]:-A)J!uHZr:g[Z^=nϢ@0i5$0AѕD*+b2rEDY !I u+H5VwñP`X-@Dk⩲d@ "kD!CBQC K<Lbh9h>.*gkurk}?t͟it({/$??LߚTP.&*.ynbۿ83Vk  w? ;ˇ!}_,3Ȏ)([rvܟ$^ѷYj_-I*yEb2| m?ǼxZKbucC OYV"kj-tqq>=_TQʍjoNwa?3puO\>><6`eLň֮7Rjv20<_ۯq /anDi}NbrGq7 %I[[fXk3h%6XޣІ8`żߌG=yhsx=>18P6ӭjXa|2|qsWWMdqN9Pj? ET?_4Ny͟n]v0oCy ̿29n(L`^6{[*q q8n> _PI~9o|)׻ {F[YK:x?{Mm.Ms 4gK!vqĦP>䃘B.}X?'\\n/ز}}&wMj$$baR;hr[½+fTTT[N&b$a]Z|ѿփOO:,9\r^[g:<, Kab09htrӖ9˷\GCgkb@vv3,?:mpx`}vFTàp_)9i{LPz"P%Z3+a`E - CԞ#ϦtJGzɴc kM'64q0;c$:'-*DEilNxjYMbZ]CxZ8Ux_ Fgqk=]^}n~|MNoTtXZӆmO;mR R)>N~7T64{յ8Iu뾰`(c"J#~@P*J$!g+@Ṻzw3ON̩%;Th9ܚd>(Kyp}=XTOKꧬ] Kgad'iFmR$v]9C"e`T<jSrLvvt:1aTX#(2J\(-Gp"t"8]m촫Iwj,ht9|:IάGnF;X/lR)2$psG0<;;n+Evx&m7֑fYQR" N~@V]r:Wro{$#0-K9x|OE$7Wi"E+Ǚ$%LOшP>^ k#x5p$)ȼp;c0ykeBYB=6ʵ nQKH\jR 3)% Q1xΒEYਲ਼lGnz_py3h PhA٣_Lا'S;JGYrR^ i5\hXQTMhRo';CooĠ6ZX˷lhUk8+(vـo}s%8wmߠDLnFqn̨쌶$Tᓼ)B&WBS9Ll Ty)MZ&7tHfic& => *.; D%4B%{F+BIsg3}cK7}I2el_ޑ@P/' iG9Akw}Ahkc% BҕWʂg )D*8@iUz^TtVGEE_4ctk ,K#K1lZ |h46N[ t Aznl $ I@e$O5Zks gJ*SκZ}r ;3 äʜv]9#"VwomqY71(HKD/b.i.i.iAG3t$pI 8*\%-\%-\%-\%-\%}ԜfPX.p8FY^-T3])ȊN-B) 8A>< u? 4\qy gJQ nlq:]Y)|(H`''wؘŕw*|ro{KeJ0Q^b2XPbJ,Ce( 'j`;B=vFJ,Ce( %2XPbJ,CALjv1QCP5}%S48fMEu*QZ%y;<$=]v.WkxH)o Bag'#e!h8)$)/BhJ 29# uØb ٻ6$W6؇> ̃3tXa)qMje[ )CJTlYdVVV_d~)ͩFņGAqg(qK?A+ }:Sk=|5qh;1[dV8S%klajehJ!JH*/.qr,hdp2ۂ#ĵt[Ls᜷]:]R0$PdܡH $hd0!)E,66+g$5LYBQQLi’@m@kK %t$R8qֳbSL7m_BQ=ʷلp%jJ z$w:5'k6J۩@IVf'Ch@ )*P1TdD*TQ)%<:30HyAbP+L$Nߎ'y @yR27$D+# JzPZ6 'QHKA';Qp0}heLm⫽L< őM+ُ]7 ﭅?|餜heLxQI1!XAKM-t%b5 tVpEv7*]'*#eo߿0lsJFG a&-W rՂcxg4A gWzcUޟvj-=.ٱ7+DaiR͚,K;mN y'sBBG6F6a#HZ컫!Nq?ԋP e &ՏhY 6 JhH9pVUT.MW{(Ho3k5\;ZAZ ^+M;Ehdyl;u")FOQ޿d +7yq%Cy]bE-1!wU$^ s zԛSoK!xW $b2DD[MA1`@3TR;.UԺ:ROA>j nD5 g9/`+yt;ƜDC80ǔY^.\pJ&fZ>8Behulkk1oSYv 6./άC^T\ܺ,%R5+j}m͋8;_mK2RWT meyd.{2`euʍƣM@ՅA5]]eػPݎG?5{a,WZ:>^+&ǥ+U7gAd VJpB&S#ģ\0(9qĶu#ߡ[XRjryk!v{f,ڍl:}VfŖuzYʹ7w Ո ]K%Zbrkj/S$˗wA3;tYwڢW'VN)^X) CN<凎E^%NvuK- ~-aNmΠ5|tVxgƋYoqVwe. ;v6S>sC攫 ڋgr3fz*&3f`vw'nS*8<C_OE_Կxn͠q8Z8)6SS­RJ2,/EGWM@(ŒFP ω1!$)y1xbXY8_|F؂mif;~-R$j"=DTje煪s!ѰK[Q}%Е`+!$=4ՊW̟#5IoKM?CvՊ7Q s<8ʥ"HE KTS0xRlD G\lEzaid d 4tzbB$>zagb F nG~߾qSԾTQsE` sʝ%{$M"81റ5FX=HL>-|`D@# oMpolcT&Fh裶4EK(]pd? )k HB ;$ZT(IFv-k0?BaV]MlW$?& 3%H_ NFj jIA}&KADtvʝ t^|^5QpV:^u+ b.$al&bbCȁ q#{S:;#CKg޸j@ e1[!Y0$P!BJNW-$7,$ z擳Lʽή2Z wk^ȲZߺ7og :7\|l]RA7o!]|_{nM~l5.q~a-7!wAg[ $l5\r.n:$Nypu 7~pQO\VkFp* K6yBO׮{~lQ d $Ry1p7ȗ_w#Ҵh^۟kЇՏ*JURSCEثrG4*m6O\%Ƅ{Lֽi6Ku^nr=*00ro܏0 Wӹv9+Dhݤⷘ ,iwrnnZoQBmVWTQcp,f/>]/ٿO/L hCdz3ԭ~[s#uѧM>%}x_tx}MlLaFL#JcۚOy$ [㣇h0=j$;rQsK(eyB "(Pdhv%wI6.HzՆuІZ &HW߮d† 6C/oHz߯a؛` Р^q;]m~n9:}罥Behu+*v,~*!ekUDKs);g"@}X-S 5$9a>{ ,sBH6L%XGaL52C(6*ꡟ%Ş\.c87ofJ>J'́Y],ԔRT;sKV.2:Ř'F:i&&Nj1YϢhRNCTTqG.΅^񭸿N~9毥bb7,ɟ6`0>ד460d3β]J%Q35$1aAX$)aBt^ iXJA):S*m,HVya 0vN҃)bɲDY[6t-moצZr0Upo s'^ &fU=XD1c9)kE$cJVCRNg*΂[#[ؒei=z2:v7`ߋ y֢uQ Sŧw{žZl=ynqzJ+ǟ..?ǿQ-.Y{P wN?~wՇJW(]}w]}w>ՇpW]}w>ՇpW]}w>Շ{/ MWcpW]}wTQV=juBZ;UjSAT費XzDb],1b],w.һ]Kb],w.g.{b=zHb],NR59a@&([tʿu;7h-p+yUz 6]_"lR/dS \9pmQ%Ur]5ٜ4m~?J*Vɑ. h!xB{xv~\'?99vt'td .6FGw>G_^~OhAU^h:8aeF U&LmK#H82\2D B9$cRJ/dTP eI21%msI&UHRVZ 2\+AHs g18jfsŸg}ܯ~co 6.O`MrUjzdş_\orN!H cQTod>:HDtJr!*ґXA݃I+)hfO3),^uUO/UAwGmoSGٲVJ}(lJJl9[CvwCY/ZpKaкD4|Ḣ2XMC :  ``2jic.RG:>m\ c"֘(Tړ*P)sL%\QiҠ"rV闶_Zqi9f#~}K̬w lK5f^2 lίN&H;?ĩ>/ p σH8X#x\gvßG.v<,F#+fMli|tVkP02+ش^zH&ck*(8tcu(A)QIPAhi. P$9b,kPE3s޽ =cpcƫ=YWC]]y=u[K3RRn=EG{Xhjħa51)A]RȊ00Ԡ֬ %#] GԈ`jZ%6%%f2b֥PtR:I\6~YhL;7lZH+|br +'u''^Rs@$ds3U:N} c,c]TCYHs @ 50ASFlGG:7=S&DB|P#'%F5 )Npb=, P˃2#)Uu<, NR)hBb;I"R"[*I<)OYlc̴̜pӮ2w}Ya=+"MOz6 vF4"~ʓዯ@h]? Kx!:_=ՓWӫWߞꉛ\tќܐʹx]5|{7Ӯq+WŊ]ݝtyYvqO8zE#֍k~ _KfZu׼dbژʲ$)#Z03,ơJeДh`0LoRQB-:j8Ջ./8o}<.>| q[Lf\}.?Ukdq<D)\`5.Jf S$!{*K0Q8_=qĽyw]i}Z->6r_tzdȼd~Cٕll+Zf!q3ʸٙ3N=@)b?PxnI -լ/xM/#sy$NX=(^8(nzL<^wj1ԩ)5ͧihmjL.zAkѳܘܽ%6당B]ciβ4#zcFpZb6},nq?,sƽa;~ҹ|<: م;6iֽ6oZ2@k3ys3&17Jɬ3Ng5xﴅJ u5:kS5ZN?xnʫAQW}awhqSKr W%C=he*C!#$j,ٜbj~LT|cCXl[6MUmwU auoSNoZ^Wh}tgK=$?;NuqvO#Twˋ饉}?<$ RN8{K?s:g &u8Dm\<jqc'jV:DmMXlŞutd8VP)pnA#i8a!XFJ9C6) vH8F,(SFo$xR G6A:Q<% 6J)6C`![<?qr3\eاi`I7/7}=ni|ݴ(?[D>f:P#4nsbtUҐ+O$r"fu-_+%In7N+Bc T!90`5THk"ZW-A$o+BɗIEi%X/'3 ! B@T$d[ !:jל*͞fй)A]xɊ0`HF-s%ٙqxh7e9^#Ć>oFXD>)_9#gt")*XMзiolfPn"ia˗bZ}B/$A&7͍VE.{\FɈAi3eQVI*9.0*QX h#Y䮠H;qvU Ѱ@`=Ϟq ibaO3Fpit063J 0&;H Pg8TZ͠V>C}xԪd3r*Q(| 4 ADd41D8UxQIf[퉣Iաf>ŕF?תl1F-,( D .eBZĀVtR[pEm2?諱_30@ skmH_d~? Cɮ qgѤBRW=3(CRТ`KNW?ٿ nS2:AK]6i`-Gh%٣;XcR]|~AiV +#e"=ڧ*l&> `HKr `N"M|C<y2\@,q ì-½`1Rr})Uu19ˍK10!t_\pfkXF;heMImFɅV.ŷvѶײ(A^B99n ͰEh@oomTD(=?9A_u\2ء,z+*n.`T oP#DmJ w*h|3j12YUwe{ZB]Eٮ prUdbHP]ծl{K,-Ɠ,i8.0)p$/:0N41M,K<+I ۛXX&^p߽{ٱ..!xW $e`z "%b@Ao(g q))\H^'mmT%HX>D1&$L+Mx:hgl=BG,vAvimǛG_"{CiL3Ci4WZ 3>9yA3<}192NyLeOSBj#O?xe]/m/=Z fEiѴ- ;esT>h*4its'ZwX50oJ1'QmD'̡3fs&re>,By垫ީêb zAHT,NH]rg%J!;vX;#g>"_u*jz]ya2y暈ͺbͣ⮿ѡ=Z*awBK^Z,yn jmck1?)uHFn /6!=>LD ~u4S{CԶHM{ߦ./%ɖd|?04ϳ@xk](&/=@3]b/r1q] 2P+% i!ĩ\Q[.JYŜ` Yz ĸ|਋"/,Is,ʚo}{W>R6я12̒,盾} ߌj􇭉A)__Vwϙ+8g̞11x=33V_)C=O]ׁu>WC%fGX)rGYM:qY4*DAȌ w9LΓSrm{)Y{ImeZ5BsXEh|4[%CDCjx@ E[+'Bir9 3i閃2yKB߇:$38l@}2!aM!dP!SP ]B"g3^A[;ʼn^,iץu^Sy>LgE Շ"e^CP;BB 8r-/EdDTLQ!>!OqOpuUŪLE/~M p=E6oxYp*36Qfly&:@+=CTbxصeZ]}v)g&e \%WuMGC aIQ' 썝H6q ֒ aZ‰d1J@!#2su =ka͑"s36?d-h!-QC-] SZq"` Μ1>GaN9",˿W$AOZJPET! $;gQI*bR&-#rސhYDOH$2F T;FUD ɀ) K2 bBs=#Lʭ/N|w2G5Z^̊8aUBS?[֐ tE\e 8ͼ-~"%M<+?5Zq~b-75 hXGP|*895ͨ qB :90cSN99G)'n:~x{2|C[6濍".*/u jr|Anq*><:?+F Cէc"%W"մjWIoOgN/rͫS& ӸkݙQoLOŽ>5O9ĈP/Սgͅˆ bq Fi88ǖۻ] ,Gq-wwmmI-}nk6^Bk3ˋ#2mѸO8y޵9<霡UFַ:d[m}5N&y$7,quc|s ǁ{OO>睰VFb!XU+H8PAkx7u:9~s-owo,lw^oWg/1F/6`lk&>o~0<G& UݛygM?^'^ >'\;h%1zݗDa9E`?r9Ͽ;G;JoSWlj#wty" (=JUVM;Ȅ(=Yp#zray쇉 @PXt]u4qZ0+xk"&DHFE+=9okT9Y*h}8^I0Jx()6T\sxA rNq+9}脴M}=7.GQi_+ɻձ{1۵ZGr, }r]Wet1yOrGeLL2 L0Rd=II8 QQR#t_* ug9TR[YeU NC:wemI 2;n~ҞD0g"vW#KĈ(EAHh\R3nteUWf}ytyє }['آTkjk? uR&mR1 W[[0U"M^Uh$]&6x@hxf7[̍&J'GDnd>*MFܻm`x 90 ҢIfl{x=4z((l娤`nv̗YѺ'~? }x6J\ٓ eԟ<;t'tTDo -ӥVG?6gRZIViGeL J8&M4"dPu&hK8C*81%0Y⩴+w)%V2#LQ E28jJpO P9DÏ6̄}o%/n5PċRpD.BPE1seJ}JWF:íg#;my+Qh2ϏRz} -c+ ^T2Rirvn x{ijVҾh "H:gVAg/1FAkAȘL ֋rR).T>J1T( .&B()Ykc!Omm{/Z#iIjABþ܏QWݵO"8=iWfє9?+f֍%4h$GfN:t+hąZe]#.TN#5bk9; [_s-g&HX1pܚh5io4.1qLHDceFkQulzxُs|}-ҵB R3-Qɥw K,7YpTCjWv+ !q9Y G- eCv .dV SC4dGwOp M|n՝]PJ'XyE>rn0=K#~fVXN|b'谨|1_A<&5.MY /`̂.Pig\:)ԙ.bzK=2E1ےiE1BĀ O3Y N@{ɔM^3BJ}٪Dj602`܆>d3N m"h,A>ZFΚs<!KA:OGS"v=٧+EAe+X{2L.{檹2 tvW 3GڏЖ[ţBfWhI5B ɒ)FΈ4q itkHE [O*0mD%VE-=1U̷h9x):$>~aW)kG_]cM?zEYI$'vM ŗl)#s(EJ PU u@ƴ~yJ&^ֱ~nw]`|_ mܭ~A$GDxM`TD?)&Co(NN8fR=#} h1d`;n<+e r p/VW>zIԈ,63 OiO?ٷƧ+M*%gp]nxZ9ޅ$z7g YAr3E?ni^M"Yl_{ 'ܒ"P SLp%')M&e<1^ۓ ^jz_z] Gƈ9X\N.bMnuǷ|eN\/C0M3wk!RQo_vvv:^hVV[^W%khȻ15'v t|=jK͛œwۋMuh\_xu3~p>̖Ct߯fm_9#Qo0z3-%~Vʑ@-j0b0%f~&M8Vb9ŇۻߏٿOz娂=>$WjVIy?qqrm GܿLՏMכO<^]׵=eIM@Nң5f*5U*k{3܋ b?|?T߾[.>Wtc)߽ncBo@Ciunu_g\WqȤgƫT+Եfrd5>PufU6៊'nE]Bǣ"x^y~XXI"()6EL35&i QM9$v.NIڽNT5|>D&HsЩ^ C>xpN%0:]',;a3t`:߮Sg $54' lm3ü omsN痦n:re'=KYU]ҞXuDU`S%ıT ,ԚX۳]ckzBV}*Fݫ1薈F D.g-M&R,>飼osM>ӎ:5}ya|ŵ60^ZQ`WD5h!,N{rR[+9ē/m8F#߷vkkrJ9f/R\ThU&eDl &͵+GlZEenb<]NRstPv/~sC//\ǓN2@Z(,`'djIt&j.:%YCp:G,ۆ xwr;e"JU`aʁ@xh uqId>x}ax㱯ǜi]ֆd@H!h5jn$ 0Qm siuMN;jS"? ^"jzloo-cD-yє.~[)X h;`+ +^!}GnQ e.q[<$ܢә!U.$\2=x`H.pd;Qraunr7!iߗ3TY$4&?xpNJlO|JT˦h|Al ͸X)nF,5r張SU@ BɺG#0&iTsEX%mp㙵ߎ37"(Dq|6p_` m`xPV%n,ٶL@@:Ts[*Nޱz8v탽Q*̏Zh4ء=##0[CG /tcwm$Ieg)+3"0И36۳`,Im}#D'Q+%MQŰ*C$\,`XZ/6gr {>A`dXS0j^d{\>ms4w+Q(Mm1@̧Aٓ77:_a>|F!Ѵ0Л $?](`lBX )2-ȀW,]& }ԬÝsv֗Nr1n}}ӓ˳bOR}~ps!,n9J9cα}0YKOqX|Yӭ(#֟fcD|_qv vv20|:üY®NذxdYl.L+Tz"PI ai, B6-|\ݴr^^ܝ%=d:5nwGn ۬C\"uWjzohnr}23k)t&n3yͅx~_;mRB@'}-SjNaAQጷI`vz~zJd 9,%gc%M=(4]@!#$%@uׄj`,˫=t{q`ASHPb%1Y2SBHEs`7EV)cbi))dHIZEoQ~3q6CT<'_NK^aS=ǔͭy髕jWuZ}sՙm5>^>|O =k\R[DЙ׾OE 7#ZbZĂ6`,wNgnx̵A[ڰ@l{=k(hšf LFm̋!!Yٰ `8nl- 41B-\q9[gL(OT룍@{Կ=)>ӓF]r|߱͗ݧDТQ1tʓȧbO_RMټHz6s`2xI 1dl[@p ST8yw6~M. 74\v@ƥڿMR~!-CJ!YxrrzqY5l3<ٹ~b% 9d%B|N7(bW˅*ڷypNc$(5SL:B&aI*J4t-˦%@/]/tL]wXyKko|3BIRB3-`y">82E̲S@#)s^fkӌhNG6CphB$^xF7uEV&@] h"[xShs9] SPCb9I"R"[$$} Гf/@1l&Ζ]f痓Ug'{3>⩭`u|n]_M笈pf7z??|p#/!׊6جu:co=݂=frmGr6ﱶmӮ{g܂?rq楑a29fo]M3W78a|Dv`ͥ^Sp_ ݶb5˶|ȢcWsPm͟&7gXMn>od[PwIOozh?[!?_SاM>%FspQi-O\eq~_vaA]T)'7IdUF!Gvi-q1OG{=KI?. MQ7̽}8G~:z{zA-.˫t|_Fo1@ [yl^XGWkQ/&'6=n>wcMy Oׁ'eBUɫ'j{9Fu4>χuѕ4.-Ji_O JF3Ũg%1ܹ RjMezSYT֫7ˬWK`Ya Ť"!(d! !{K\(JC*ޕ?ܾrzMBGj<3Ho5JYi(H<8R&WqKj{qJi_`V(uGJgbrn.ϧV~Ai61" :wl`TB)M#'tFE~)rd>HI@κXY !{ CF(IDp`\(X{ͥEZr{f ̞g8Y`-O[ŕ*-±*RC|SNYx|dӶJ: 6wAǀ lcξHk5JCɃ\*ɄVʽ^R?FRPDާ\8$e̤mD'RĬc, di)oռ"N˼n~“7bFM6tJ* F  |&e cgډLGTϤDpz%c+;GY 1vcKԤT%5'hZb`^IQS=[wOS q8:q>{X5gsU4Crw1Ks{2cjiJARIeY8uveBlR@m FHR'Z'w,fs e(. 極:lT%lo`3vYV%Ǟ} m|8'\W eЄꢾFe%9 吰S c;0DӘ;h\4ռwT穧H| 4n iDpޘ'M+hC֮pw8_1‭D 8nng0~Fgbı3ӯGr줫*DHG"0҆  yr  e'm2i5hFsGUl}%Hk24h%I-'1 cƝ7FFKxddJ H+*)MI.*o@F- B (.Yg 80Ƌ؏#=xaM?hd H>W5$y`@8p7EKmͱCvz WYPD$ZhȘ<$C+Heiz."B`^Oep!8'jHY uk1!6PvQ9믲W#_gq&p\I=ktR C:8ˇNs7gCɫؙr3Гvm&^ \Y>v5gri4c*'a@ 9 (;kz2>=SMגDˑ7ZO_$F:N.W䆯8o]^癃b4LWkJnQϫtRE))4o^Za?'L!wІ[\'.q;59S>v4mݙ4.Fv>rz"0{nn?s7 NNv\z7^4$icO\??{G˂5ٙw"xyד%f]t]t ߤ_Sr m_\2y:*Dy#f4#ހӮo 9T&x)c}&Vǎ ΍uaR;hH-/cЌꐼծkImio_{ "8GKk,7Fb7>4%0mb1{<נ@Bj7ulfGmiTGC1Xr4 vk'@8ipT!*NF\%@AA=)Ur9{?ZQe)j58t\9UOͿW~|UĔk#϶;^toZnX+~ &/jMׅeO--: ؗ2{&j-NqN?pZ:wq8 ZyjIݼvx}4+ P޿`4OMM wX_^dÁݮ:n? zrU\x/9aNP d2D TjZC$6&Sd46qѥTv r ˑRTP$HSlܡ "xQDp w ^DEPK~qc=('[, ){p<զ~z==r>n`"=E5*#ZX\tH@Bd 6*I%XJ TiX NC)EI@??屍o :} +4?iP^o_+@CHa6gl`cN }@ 7|L+]ŽxFK^(!aIHeӂr4!p Q0e-Ydgb]H"VGT֖.VZ{E2ft"x>k,8Pg) keTADqMϜ%dJZ+iS^A(e(Bx]ҥ T$6@$3A)k&=sOࡲSy!?Q\8Pv4RSL1%r+`$'ڶÓ)>,;X[[n&MH2#Əwd2<=/BRؼ Ю>n|%ŸA1)/9OZWQ-U.Qe-F knNEvYjA1QŜ` R#U&"f :a1PX6Zn_L@B: eҭ4 ֧QP~Qy7 S1Rc|Wzq|'|.eG{ao ~N Կ߾M޾ ~׃6l:@-N8nR_jgݨd  墦L@r/9Rz["gCo&_` /4mm"$tt5x-N&y>-yݡk\ j0vw::Tm6L~6[)2`fw;lq|;& ;Τp${'`Id'pNpӒNsW57cni 9}jiWQ4hE f+I S)Z[n[?-ǟۖfIʤw:2/'b :/TzL49Kg`un8LEm9xC*&h`0&Z1x`ɢYa[9m9ԉq[z^,rCh_[9s|Wo8G)E`K95_!L#HV(0j M P[-"Sc_} gMkscPS|ۺc䎑@)ٛ}1̲˗s0E " Hy'r™*1cXSQnX/7a! "%WAPʺcCDN 1Q!Tgڌ;fT؞Sw+:ڊ*QrZ%W'G=:G*JْL*EJH$x#l 276ÒOF8d 2[rK胴*DrRGh:CaL  (oKW+ElQSF^*/BjQyՕhuRٰS'iE s\D+l6<Ls"hyK sh˴[LrY'Z\VB"H``{x╓kӤ$Bʙ%LBQy.{Km2,Mt1*-@rV ݊`T},%58o+ #KT8Nu6F*`sM DeiE|z-?U"4sPa#b+ZjC@cZrbbA A-?N:,~;jA UKhސKy?QUX" *9*"cǷ7ROlYhѓ]2ջCx#KM%iD )cً ʴBtDpBQ&]Eӭ mXG"|XdKcM矛XJ,sC )@MR"91& rUмӊ|1cGUl+t z; ؤT?d]vRw2 HhMlJ2 ex12BLBZŏ=PR6wIH!v6tEW*$"/s$W& \ŭ!U9M3U{*U]f^ۂz l=gs l ۖ=JGwua]Jjpm 1{36l%++(aG`ŴXU:VqB] |L35g;f++>h|%T pE)$SZ je=Yt}-tL; jʁW#̛2+p/_\١܁?@G)aʧs>=@\&!F,*׍8bZQF Xxœ=m~g}GCE4H獀`XƬ@CL@Tc@'ȇ"eQ‹"@Q4NScp H3HY"!(@mrր59` m"כA ۮ۷ï F֐wit]*է#r%i\}!nlBzG'H1^3Dǟ[W֫yY?[Vزr]5|xç;J_Z0"Ұu &uUI=@*X)& hS\Q[2!$gDT`ƕg' @<گeYH-ˮ,s!oh4+ya9AD kOSOT3U`tY*7{_䱀wGz. W[ o.L")Eg<n]OQfnmUr9U2WHf*Gu5ru~dB!0Te+^0amWfҧzIm6_oOdKֶ WdzvZӜ庤-/K fٿ2bsG+::0MWO$|yv˹a~9g"fN<0Iy՚*m蜸},+e4eYf`1~<%;B%c]hT+܆fwd;낓4/"Sݟ[t!QӅ\OEMXtRȪ5-zyH8zVW#W㪫Q?J RWJVus-@RWɨB=uE]WWJ^ BQW\OE]PYՋTWd2'`#NG]rќ*juuU4| ֦&Daʟz߿?.F&}ׯ32Ơ` JklRZqUo)edּja*'aB`ܢ\ ځҦlrLl%+,, rssX|n7ũ`IĔ7FͭbH\l?m*_ ?э{I%[f.|\ѣd pl;4 \tNar6 LԘN`eNc NhDg TԊUzRҞԉ"Oi(R'ԉ"uH(R'ԉ"uH(R'ԉ"uH(I>) D׉"uH(R'ԉ"u2BP'ԉ"Unj:QN}D:Q f:QNBQ'ԉ"uH(R\(R'ԉ"uH(R'ԉ"uHEQuH(R'ԉ"uH(R'ԉ"uHM_kO;! žLB?Ps F9/d򇭿'sd{#7 /,y7gPS=OANhnQb]>h:kU e7 MgOC r/s8=O[8"jtѻ-6+W̸̪;e=| Ixf!|Ϊƺ0 W{)7beH "lsFE3Ng"uj9e|.Ed(DB*Kc|i{B(..4 IrRyIgvfy6hdp2`bpL>#ۚn(=q rvDJHq $ϲ@'$G#6L"* ڕ3Dz(812 6vAF&imP!@&L6)iO9+ZR&rWL&f Z⿁,m#Fxo+ Lm#ܻbtJ IB3B+RQ*mgz =}dA\ȑDKCE`R*e,ss&K=ERB֠V_wIoNK"$I``BKʓheAI"%B¡1J0iy8ZQJGl'vLG\dir: R`L 1fXȸK-t4,xSňv7LMuEZcX7obEH9%c) Zf1"Y} %Pr(Ƚ3q9UhO<`l1 *-gAлQ1]1A4So߯~tmJHDyBBM?Nx74X 蹑W/ګ_<`kM?\_U%`6ݷ5@jƶ"#!-;.6j5K6x=W-@@ oаL{G[!>K~ɻht߽A" !{LAAܐ3&MRrB Ke)ڌ\2o02xhmwh#gP^ykiϚt^@iʾ5$ Ym+9AuMz6 9Rݓ@ .Kp}uu JUMCiUv9lPuO!y..8Y(D q\HR04'Ї> 6:,0-t{i+%YiVe ̜c$MIMvs)@ݢrg0FSz^tJ_/UƄƒ$l&AlbCd%/ur:X/YbHIL2!P#w}vډ&[J$pC0T@ny.\ gxx!xt͠5-3p…7j۠ bSrU*IoE'ijWTHADayԂ iElLlr&ДՅ< )UtxC}jsbC9UqFnL0#'L 6p"z2:#:/AqtJ`)*9[lbiv Q7;&+6h4[C|}9xB uwւ/ɴkT4z$4jASMWMW5'3ť-MWD^Gӽt=PY_[H$Z`hP+G:bE}u/+e=I{ۛ>:Ov=^DmgG2&RI6y/ |&[y||^|Ͷ|qEwGYYŠa* 5(=j25 NlE4YJZ];A>Nӵbh-E h :ka65d& BRB Шdo@sq?Lih9GA GBr 3ePP3 pJ+P`HKjccQؿVt:3 JPJkI52bT!V d;gQ(Oc*H .m "|"1B0,p1R0U$ (U g [7BD JTօig-o'.JsZ|ڐ hE\e~-:/˯+7Ro~/g ABY+tP:_i,#GG%gܐ#P 8-;$$gn4JhL YF䐸* %f:0mjrQ[ 7Ӻ7S? x2%g܋8{nD&gL&ZUPùJjnx< [E_GC~.m>ܑ"^ 5/g|`럺zɮvU:/۫ie#Y[8jF<*nG득海pn~|x8Doʮ/YldGjWMg(,hcY48>_;2|7~|¾yw4 kE^_ j=z4XͣryҔ)sÇغ+~/1?ҍL ýskhxa$K)th*GBѐ r#Sc(Jۘ ܮĄ6 +  gg,zΩ\ N I!NXtF,u M7uzU NRl ۗᗸt˓[jF.'y^OmC_~x};@Q>*7K}7xzLNMJVYXf.%z(O\d3Cs9B8JyOvʜ%l:a߅s{`Q*6 +GmLzZ6:9 nYF ;|uwdƉ5Ev!lVCV%k7$e;PM&*ceʝy[jTӓw k1ؕ?캍;׷Re{,>ކtR;{s)W+oȑ&Dvnx)&to"6Ɲ$l!")r)DښcY4 ؘ r=)"b6g&E5 Jb&'U3VgBMkQI|a5ʾ_xP_x#Qy˫|H- 朩<,vE'y౫2%IXB3DhMkp5YH9̂5b-:KD5,,+m0xp`2H Z$aC{`cVAaa/l٘1md9+$p>Xt@"LJ@\HH5d ,mii`7ruQ[ѽ~;u:VӢ欈!D0 a|SFYߞrQeb($IRơL5>Yl(WҪd!-f;l;jf̍&CJ8`f <)0x)rLEFA*l!La 5l94Q-]ax\}x6|Ɯ{e6N:::RRZ8:=y8_tkc&p͙0Wr*hp$<8o3* q69=n4-y nm-'V`5plL3T˕sd 8( ;BRK9*aY IO)Aene< . ho 2wl{(^7m,!O'i,sʥc2\6o+:*'YQend`ʻ& U7 TK5Q8LZl B$Tu[ #J'.ʘȦs&L!Z9k ǁtbO҉N{Ksҽfesz֧ to rb-fYB /*8EL(mA (TpaH.dK<t2>Lg eJq5KkO'vൟNs$h% HO~kgyO[D{|хv.D'l{BMߋAVTtv}7,{}3eqh7;Q/juw"KW~CbjQ.uT}X#jv#jIK_Kp' F:Slں7٣ t cnaX F!{O~dY4s ]ڣZV r 'cN=eR^qIqe,HǼJ.²8 nhU@dd@X`ϒ[rYg!r.+*I[D,(@ @s 6d4XxD];CR 'E&Ö40%6DlS2VkvVM-%~e(*KI ہ.L#2%μd{,I`a6fhrQjǗKG>gHЀk 9)p0TLi\'c9g1jDBxV5.'}R1Io7MR d`\YTS=cѩȃV>´Dr<88^U/m7LhdFФ^\qam T,9ɸQ+t3sok"B}ѽ!2";`uo_mbj cN1rާ0f{!W#;kyL UɓjuEЫyXN&I6iV/{ $J*!aȍ;MnIqJCoe):Q#eo$:8)tcRTƋ:]{_n>V|pvsj7zp"k2=Zw'iiqz|qY4l%99W*@h@JX oS>6S>S#f1DDDfĠRsKhmYtgӝPwJk_f^\n{N˙??sӓ}N}OW(r:Ԗopq4W4LpE?q;dMނ)@EH 8 /$eɃdU)e|VYXf.%I︔bt&sȑc"/D@2&fe?RM%\gAivCYxػrhix= ZuMbOuCtjPKfz}4 ޴ٜv4eC-WPv>Η [|z}Mk[]CϜuopXxBm鮣I1-jy[|q vy l`OX`a0Dփvى k0#`tԡ^52n곦ڰy7 ^^Jm WlziJE,n-eFt~`zpZb9}ސ,suܰJ.fK{z\68x+-TXߜY9%mVBhGHgd ڒ N3̈́ܥ`U9%?98%&L|Ǝ2ڃ"l꽯:mr]\ R(IST[kYTiͲóTؕk8H.MMrTZ1lcl[|X68pnl-uV^j>Y/?}ꪹ\aի_~Z5QUetRXj<j4vyTM-^Lv )OiQi*џc݋((S6@+eB^R(u+I~avD_+Yb.53FJ\:459r"th"3Qe,yǞytʐCJ˞wr(of=^ٻM蜦rcꬓZW-8_u79LGr߇#za8' kT-S{~n]uoҶgxx/l I<U}1.[Vo\W4RKgm{v{W$A#W|7{W7:|/q: PJ&xÛ欫Y\CH.oh⒉M1!1ٌ <4߿)&}މ{L&B?ZEKEw$Ɲ\aR;hr[½+nTT4[N&$=af]F}<ځ#wY0Fb79%0mb1GAz4W\T6u:9WOhClIU~s۫W߾O %˾.UԀw{ޙWxcT,|*|B-AEmNk?=h)-jOJCUx]m)Wb8&5A JRr‚vtƘcp6P-Mj *yDxV nn_H5Q vVoR%sdZhc"1MP )K$)PZ*d]=4L_|#Hq QBx+T.)r$Y;:]JD ʰ̎"E4eE)w;Й8h<ϩfu6neǠy$@* &G߰%¢wwI&Ҵگ`MMՔljoT&'l9j؁ uaIHeӂC(yTL],2훳L.$G**kа/—ENJ}E:f4%D*gpjT,eEry QFII3P"hyB6lM+V4mvg,_]Mec`pfFQ0{hBv@il\{ݜ6w@hL8P;iEԂ&L0 08!%d_D}"11"4("祁38x/ dPdJtbe&P7-v4RSL1.5.0hiJ#ዳq[ot&xrj))7ts4\}F;"sǃRju o\q##2WY`c\ rz*K)x1Wߠc1WY`y<*&v5J*4WHe(8z"7pxUǯ?}_ϦscqP/cFoAs>~zu[>ZZj0ƩX;L+9LS=;wL*F%PDEɴWVjBtPs#3^ij Qox<.،⏯_>jixk-k5.j)( `kH%!ï_% XwQuӃ<-MeU}}}$wFr@fG_xG_xG_xG_xG_ fwSr6:DK:/K:GG*K.ꒋ.K.ꒋ.K.ꒋ7߀ǔ$Jc9Od))'Q(Q-vYz|+BϏE?s0wQ$(H9w5X#kcC3&)xJ,櫢NaB|}ô :}~tp,v3r-CPҨӂzW<ƵQ b%,xVB,+#GE-2xhC03Xl.A2h׸ZQIv7uPJibE26^kw>vܥf e%hǜIPdtY1ҥ,a{1ܺIc-4ssGݜ9r&z !\4ʑE %(i#8B@dzy=/|Ǥ1ۡtf?!xdݶ_|7x=f7r;?\f[[^Es93үQPǫr恵c5W}]i4 1>1c2ڡz=7?o{Kޙ`iNZ%׾ɕڠT jN V>_GqcX6T2#vmn/uG| (OH(`N0y֣n (,wCL| z0;Ք"Y&5l'mL/}=XZnHZ {R -At=' ϩȰ*I(YLB`LD [%LJth8ux׌'(V%b ((iDT2f3%FJl\Vy'X`Q<ډ+LA:qȡ8 z35tsiCa;bҍ(=a'1D&+>QV|F!)JbB'NickӄC I9(!)j4 QjUazYn *BfAKH'1'~)NMZ$r5f@.("CQ #/g^[89qjktng͞jUoa ⣲N^QIAHLʼnLzBgH&x>lLx0呈kvִ>$#Ӫ L`c nӵ"g_C!I%HU6.!: C2&5k/13q6M{ ݄PWm.W / 4,IIJGW\|KYx f0SEr3F7Hf*cJr2P"ߤMBJm9 ͓b2؀dR(e:koڪ{[6mzmј>ZVXkGח}~6, Rlp6.z,dMH_`( +v0h,92FI[+F^#J ob2@XΣ^819n0/]V*[taAȧVasٔ;lOť͟^k''Kod2$DC]#JhbmZjv2ZVSD1X R%3 = L2 CIV! UIuK3rvK8_JMNޛ~OǛѳ=`HgwLs#ՐP/U>E/>]̾\l歴THC>{^Rծ a~ׇ"m琿LUrnPEmSOHeJGHuHY軞Jʁf\fW\]jn[gWMᥘg}u"=/Uj^UeZw\jWPeT:5EO)&*7Y/Ի?- _! _6V$=%ˡ meʂ;ˆ Kmw\~^\?co_ezarՋZ=ogo. 짟v~o-':+x3u_ΘnYɯI}f|uzco||_y8{#,g]/[QD0EvݽN.#k&P;ґ-\eܝgvHV+Vl OIMA||}?c<ӓ;nC\X9'oyMN;"$>zrRŜ0 & E\d%bPȾZؽo)G{_80 %gLBPS}&d,Nb[Cf6& ҲPvyYʄd"YnlFc*Sfz]or]>xv8ж0WX,wd} |sgC~|}}8(yh˹X-lR`WeI y\)5S MrmCEw%2jō7.!#e_FGJPJjG*VrR'8P]hB5£w~]켾g]*.nq"E |z:Ng_7SH7 hm(+#Q6,ɹ yאaOV^Y? TMe]5c9Ёt%Q95v]{Pv38TkZیZ{D-TU1aH+6MY 2c0*"KWozKjQXRtkX'UvJc,Ynh]12fQFI;i ;PC<6,_ӯګ{wq0HS2V(e#KB`"hKm }$c&$=[de25jS{mym0 ([Uo6#g7\!eH#*<:ܻAO.XdE!1z]Mn@"G>vΉgAB*߿Q~J"@Ng.J)j ÆtrpUפ?,jwf+S]t/?Q`']TY *Vd_Xv]g73 g՝^ج${^6**Ia*Ae/|IXZ$:|PrUZg 3$<8\q^SY|ozW;$dA L-4(7 =W-RIZe ae *H †HIzv& JhoX7('a*ZS3 Vo6ΩxU`< R[A:$Dԋ " 3$/tBhc> U)\chf.cVJnQELF%&gYY7 :f{:?,ןoRaȧm9|_C&4fzw1`D-{I)IϛfĒ,S<+U.xΥq3'C:YgۛAZʸ?9I糃Bdy7"佯SA{#)ZcIKi=LN{ LW]U-$c˯ϸ,WK.赹3mAu~Ki߭G2-v}'q"O,NjV߷@7;㫗R`^rm|Oɋk$wuN#N|S'}SM.iň[uXzƋ^n+ݛq>fv0{a9:lvkh2?r~܃:j\P3jd l;,RG |(r?|M5^trաz4^n}ztOM6=99Z>Kf@ZM~9b^~.8?m , T RMůL0FG'R4EU*骅]vq6_=ɳX$HzW7/_z/o^@3f.7`` }_5/}Z4ڻhZ{Mӣz5i7vz]L4n7xĜqd,@5=W$Nױ_m~B&Ix*V01Do}2pζҨj/-L]cѷPLO~݆ t.Ͽ#BQW(c)"NjhAL6S_ZuP紞?-h>qO-1!!vWn:2痾kGe -N-c6L-c6^Vz-'V80v% +OW{ '/y-)DBOC__099:[~`4wTʩ(d*_FHRvw2)Qn(}7k>(}7+{\(Ү}^j!6\ \ \l);\Jwzp4oCpZ \îZክt+{4b &3pbk=vb+=uzpE:8K qvbsbk~p+{WOn`y18/AwF3.OaOvNWOϫҪԛD20}_>zPǶmǶ3~vЯW_K ss[ed 'ӏcqKg+.?'*-)*M)0M&iSˍz) ׳T~JeˎwGdž)Eei^ESh6ZI|iAgV'K.41Z)`!6U"JVDXDk26؈h]hZg{FVz3'Xir;W0ױUrW%ክ\}^޸ChYEIMR̪@(.eD6 Q>ek03.9>H<p9ΝăɶY%?[5:khE\)m)q̳[ oᆽ5*^Zהu{I_unk5pO3u8iWD{tG4Fu Xy\u8p?*cs5|cF&sҺ6Rm\IZ|ik 5*稑VD#JҦ@VTeVfkKGMx{-׿|>[iYy^B;Cd߳VټOREkD hU|Ol22 Ib7[ {KrAHE ėdE,B[ _RQh4>X$5'?L 6ضF'W\C:jR& B̹ 2,0DH%i>G€Ak>S:2C>yj-%JdITB7JpMVdl\8=KZ:Mwu|txx>֌%)m]3BJ(W4:I(*)V!ZXp- w—n2XõnJY ol&ۀ)Jh2eQ}h9(+k0pf >KaX-]uHJ šaӎMJÛT J0U@.`ާ\1XUFҚw-"9>[_ _B,@` .#&@x\/wYxc*ʝ`<%C(duC).n]s$,U%X3FDE(Q̶h:xѬȵ STBfLiԱ9H01Dk095JX:bBZqڀ(̘ƎQ`JczԸuړ(.z1D v%pUnc@䒳"ac4V`FY)dS! QcH+ѥ$T":˄wh\ zXoRi,UMAVXT`t[\ ԃshR3oݨnD)RŀWMP1A_4I2i, xDEx*dM'0Miߜ ×Te2jnuG͠ ۀ83YɂNU~D}%*U U;#*)Vd%gB^V3I|ӇU&}r۔j}j<;?[OoV}֗m,>0 wE{ n (8Vܳ>7-(j;Կx~W|dhw N ֆG_V t=I~.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫ.ꂫo*B. @|psuqrK$WOHp)XW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pW]pMW>t$;+Ap|ʼn.z+'H?{Ftټ_ lf`gIg*DԒtz}JRYVIe*ue C =j,3+EZ>]_D".%Q_[x7K1l[]>7@7jXwgq(EF*[SF q:7kdpus8^>] >o8>Zo9|HdaCY+H/) T>EBRTQξ)JIy|zOhMװU (\Es.oG4\VmUC[?pOR6@pSz:d0MuS9i G WWQ⭀{s@Q4Fq7tR즿7zNBT#pvWOqbw4q[yJ]=IJ U'+աS_!L@`IŸ(.⮢R^"Q/]EwqWQZN]E)[)kvW/]m}YHes2AqAY_5/JyegTå j9ڋJccEj MyIܪ>} !Yq}'WU$8 𧉯tMԡKw1thF߂ߖ;lrw>w.>䘱y |y܍+hf7tY8tO6ZiN}MكŪV}nglOocȯ? AACn0wx8scn4:oo`], NkoC,o?CH1PRo W^w1S_ͻ=1d}2ծ}Z/xY.}7Ng3\46g ˖u%Fk[Z+vOfq̮4&*;0-loJ>vjIi\scb~U[S ԻI=AYgҔi-7N#>d1M9VD)  K T¿V8d9K|8:F-#'; v %t L42yHHjf,u +0JQ N<ۤ8e+98j B[`D+ZnEn1ٮ烠D#|b9]]Iws4㇯f&J{2Vg{4ЇcXrnϿbXF*GxfXQ 5B=|3s~+dt8<>EG cO)Gl;, **Ӝzc!pp?}N]ǍK&/^j2_^^}(zaftD]OO!mrhQbj"u_וuCth]@cn{|O6hhwf!,ehknw ׍w޼.sn}ԴWXQ=t~2.+l:ӱ7d\/m\>ŭygiHKyϣ'cO*ZؘUt,VZ@6Ա\4_9-ӏk4׍۷w^M/孔.6/v,KVRg2c "k/%%LAbi1gx::Ƒv|6M\wYs= 'bw{L Z_n(f57P÷!p.+-sef̆y:Ȭv +W6PG00ut"PO'԰yh |_D664z\hj>;'/=au1-WRW]eY3`K4{O։kÎKΆmM#T-K:W[UgkR浚41<̗ut.;iR+XG!RE_-ch9p;77I`/R0ZhY Lk¨PJ-@DYS^ѤT9ӯ0͇dNmKBv[xӴZc'̨}iL!?.Υ'*gkLG+Es )XBKd .8##.?f9]tNί蜞= YBT ƘrO`PjR:`Z"%Jj%1Ia&V:d 煉' V@i6(҄Hƀ8I1$3=#:aֺ4ėa w,&GAҏUʎ,/?;rUQ F,4.p̥L#˃q8n- wX3#WRimpdT-8ٵ!n 9-$XTrO('Ԁp+vI+SMi3JC%# (AVAJp*UR aEFvLvVodfn\ݡ+RPԂ~k.!- Deckb4 N HQ)z 4oEGOXK GWz0M `A #M% PbiZɠ[IIΓN%^9D˗[c' BWaR"qq$qg.N򮽒VχpLMHǐaB(A*4CXrnR:)9%\f"0s1d*r~.@fǘFu)yimpK*<*NIǔ!G:XՖ`4PH\WBK\̨{GVhRTMVewuigJ$bɲ !,Cߣ\)Q\.eOQRbOQ~(S8N$M'Sr]`#mV]0.8"HţOSAN}αjVН+ IaLI㪂#,bZLfq!,d);V(iZPa9$Gcj٬Tj ERcib ~Xiu2qnt7M'EXhZRY*PP)$S6c/3Riô6p,/O%\.CS=efQc(Rk YRkY%Tm19'KQ/RG$A$

B`H%KcJMt4Vt^RlWb/cwV6zwRm L<.h<$rrt 2CxS͂3E~߸SB[UŎŠDo/$=UqMqVzܽosK`BK]0hfE|-<%|NՑΩ"D0b:QʼnFC[ZTXZPĕ7)n= *]v%kk_<]o)Y(Ԭ]*\Nc⡛/. qf)ǢfƘcb6 K%D3kR@ 󅖜N)Mfx/ .vg|FmKa짓!CΏݛ2,xK81T)b.5q# \R$#Z{=R b=y.6r`o0X, ʡ0pgJ{H_)`]Ry0qc4B2&)U$Eɢ%EJՕGDȌ|Q}|Ք+G215Gȍwo󫺾߇g+$8{k5yQWtc.or@q"m1d.8k;jcmԃRRgȺTWtvymԗg^`!TZPaҊc,s==5zp3!k3&Os2zdIkfV7,; +|ιL[\Rdf#br"ސH*p1zMRw:κre^uM4l6BIڈtWh9=ݼ7!\&d>W$(vU~II;kYSA FzO҄R S^8hS {UF̠IhpQ7 u֋R49)*e2R][y٬Pt52ֵر--zEUY<^Pl6^|Y|ݍ>Pd[`MLY$|"cD*uE"9Q(AAx3g( Tf\Lz/(fS* 7 6jIS`XC,hӱL- s/VѱV[mv`0Uv@+64) 0|aigݤb^uױ:f gLi(۬4QE|@RIq-ib 6 ґЅؾ5״^Ӷia[P=ۊ'tј^m7۸cV`+ iv82g ͣMWO“ Y2pj&^9;0L>{[II(1b܋#L9R hHeI LȽlBEX^嬺f6rSů`l%(D& jv!;l4{LO}7 Վ~j0AΡV$K`U$ō $w>VǼ+=oԾ k3 CBnEwEΣLr(9yi  KJ3le4XJh-}o3ڒ",etֆ3.D'GĢ耷 {>Lt0Ot2!WJˍLYgĄRI $d 19&r ΐa6d!,d)%F`B]V 5Ì;g('!C^UuB# VD cdޫD 3%v4pvwh@|pp8YҌw}X\[V,YĔeDcjxxRy'%}q)?T0ROOl<5\^r"jx]mu㣬LJV38 g\2qtɎhB\#7 h2%xqm=7(Шzc~eC6nqA[7~vftxvZFPN+G-Y:~>W1"M6^cNNWgjΥUd(<)*r/HhvQqw%q|z JՍ:͞,Qyjnv20C/ܧ0޵m+u.-TiCߢq%0$WxwI+!/ae1˃ )mLQcK+Y|4<}?? &myx\mI"0Y*_F6}`RiTFFmvHIBFW?ק s_y=̓~PVn,o |Q^h*o^4;R+riʤ冋ؘ#ޖ ŽK&:cفzT_e [#H DH,Y: DL1TRk&t.9mGzņqsXL{}8Z̦`U)c ,0 $:Κ{:4ܗ#5`.OwqsL|)+duVa; APLofdt3rivʠ6hG[ꬃx MI8Go,^'<+2wZCLtIFkajH,'a#D:3G2+<c=! inzNvNu!ܯoY-3?`ޮrPB4i;M-8H3c׌‹/Еn*SS b4beDy5N+?eihd3)%hNzH$J)4K25 q`5xI^|7FtAEt׉/ONAxt. =z ~F^4wsA'1M6̬uN_K/ɵj?~2kcr;j~j-23'=WTɿP*٫+拣Gjr<0([3_/~+UJpҞz.{ea煿$ƹT+pMm vsvn >}ضR ؽ8l\0y;-N+e'cq"ZxCZR ҺYF:kk3p -`[cL{MV,:Lw0B7,mƁYglMVYe}U_a}}Z&=yF 1n8}F㛠jijiUe9k댩49+@0Glz,׬#)h#dqͺX)sB@L<ĵtY$±BXzg"e+A2ʜ:X[ycq@/s $vp5̪&$72- LSqx½7r;9h73 ntW?]0dp|m"pV MKc6GT#Eng&I)E|PQ6Fc<Rtyg,@Diq Y#YQ0kmZ ԀR($R RdG0.tԙ8#ў-;n D6,s_/L?ryϹRp"ȐCaX0h#PK%)u( }#ͬ6Ӱ"#(s7HP<"~0PE\Er=H)x 㡔V">"+ WӾ"%\=Cs2uH$0`UWC1WEZnG?Gse20q8`WC1WEZn{s ͕jimy^z[\< zrRZZkLmNV0Y'Z:gּ}GP'VBk[3.{0fĵ`tV}7EJ;ߎz{4p-G ,$\=N\ر8i\=NJ\G+ۛus0:sZr4qke[ɻiCGU8/*S*9%l7rVWXC ?N55V0f= C[` `1u(H+` mR>CC+Ya= sḚ檈+PUvW#EoK`c\iwsUЛghťEMt{5qG܁rD F7njl4L+1Ɓ;&A,1YgiZVUFٝWf_Qt|yQo׋˥,7Q. 6szj~pyIN,|6ڿQp 釗/Wmfପ.r\AF|&Cv*K6j:$j,V#0Y7dA'aIⰉ<i'iLW@}5%!Y$tZ`QZR&#x' K4'N(m71сBܳdHFgY #[ SJ^5. Z{c3@|uL*.mL&fL|`w7I&6HsQ'%œizSz#Ph}sS<(EhkŠkeeXJv_u[ףKa=st'\|5 kDηRezn,xӦ=Amiiq-YvJd\d|Q xwmI_!eq@FCq96Ag OleU)qHJ>Ɪ_UWWqY4*wlj4)b d0<JpB&S#ģ\0(9D #aIcl:!0>;dE}~7>Lx>rkfL!ZbULW+T|] VRME 5 P'iMP<ϐWYwVTY/p qh95c2(RBMWSc:>TgYj,klҲ\cW,KnBk"sgrp־ܥO}ELz+r5+F/u_*rO]TKEҥOflA rl)^gt:Uݾ"|U+ul&Ooװٍ0Kd;_c9Y4Nr)٧zWK/LV,`\639on~qܬ1mb!KY8kΪȂ]*.VHJd;{xpW~wrbT/kyL0 !-b(CdJiEe̱s-ړls0˚ou0-?WoV(i=^^=Y/F 6; RS}q99@ 1P ༷T,NH]r`Ziԋt$K3N>pio +%q,R4qΙ$P41V{TF IFĊ>O)5"$Vy) I"8hol5Rah, Wò{VìW]vna-cN zʜ߸LCT%֘؍n T>6yMp[?_Ifw\">Uɽ o[tiה|~'ʛ^SD 皊Q3i :r܄@9g&pإde5N1&I>zS F꘬gA4) !*T PH5-#gYkQQdac+㵲P6, y+ * g屍htux}=zI|ɯ|uGo\bn""&BFȢ+$YZG"#5Y OpgYlr^ $F8#rJLXVm؍YbA?."[ڪa-["&DUJWmT8hٸS 1|L2.hQ=s6iTc--5Nhڤ4Q 2#@QQ14ܡrZ&xf崖ZN'iSҳ~c-EFMl7l/71+mґRơXP&~}5$p+'d-P' a Wx<:B,O1Q$˶hRyE9;ǵ,Aѫmԥjc6RT謏 0SV2Q԰K1r6ɉ\>#u-g-^p8M 9L̨21lo.*~ĴxHxP.~c#eÞPlgȁjİ@^ 8i=mHGO. 4'L{5D2VơV)1Iň#':$˂#G HڷԾ3if@֩Ўb@$GrU3qF !C%vvGήP!qÙFcgг_8* A>&u jr|Fn|yڿ+&Ugs'%a8Ν+H٢l刺MWJGxr[%554'< [.64AϘmgC's4rؘП;KcTͳk -]TVw>7_O.|Xf+A|v׽.8G:Aq.^Rד= ۺnXm7h%vX>!FJ3g1kA8Ѽmwfp^Ytu֍Hce/>Ly$w,qy0, ޏ3H,6 *7ʗw*!~7QZ?w~}?U q8粤Cv #*uiJ* UJ{5n ݻkd???}?]Rf.?ׇOG='.c9R ڝIu'ځg]OwYS]cӮt'M!~Qeb׳~m,Eah&GARJ/_wңOSl)EZhcưzFFB$HgQDD5ZQ) PJ8ltxgT%qO,"=>`wZ`UY #2fB\`vO{:#ߝ+`.O7q&w6JZ=d ڰX[qA)')H/*zix DNSRKod>üyk8BDJhi F ITp"QD@NJx4$h ;]q@Wz~Nf?DZ< j|2mϷ-;SD*Az?f'\/zAP±Bd M(9.q<GzWn{ F !K2 K3l:-\<+e!£cq,ZѰKKInD:ik*弎BRk y/JX.D ],In ՟ uϡO^{I4e=? >Zy$I 3j1#h^ɼPƙ ęB c𐉝+WM޿|tO ~q'{'2/9v"W ~?D-R;uxDznL^: JD g]TwJ^#JVq2o[OW<~*7/+eDuT>t_LRZjzS~U;|}us> ʘe:?j<3Rnʜ|>="+͏it 4 jjDqUo ) @, F攵. k^e ɝGּE@ՖK:+^R d7RKyF-½<=29[y"N l$v*,apgVS} bF0~mn{aRʱ5daoh飜Z< 7%ل0D!8 ϙ+`el.H C#q0p(S`Az½ҁm4yGh`AtygBD-F"qb42b0RLj!9SZ"B<b,9qiwyclG(󒿺`j7i-_†p,ӌpqE(,m\!Q'!p '5kM#SN7bLFS 5l9j %d}s$FpuW}b4gMudeѲ&Wrs_gUa]LE.ЭqM//So>q9,o Y~'hse"#!-)1gލ Bt" bpX$ zX@,u(:X!xW $b2DD[M+1mu#O)^YBvHk^peލ7\[?~s2䏴Z HG)͚>UI/l2^V`VuEr !0gOh{3R/$i`p僕<:cN!ۈ!<'U@ojdbo^P#-(K$F)Rr&g%J! #2ܤ U{m+NakL̈pf7zusŵD%m&IŊ;WޘCc[ϭtG^:kdNׯزdnuƛ_>9śZ_`wg%y5?풮Tnxqtp[Y +_.QI;[Onp&c>osHc58<J_ZH9ޤCgR77K2RTmayd65ο*i^uWWWY 5.wǒ͞]MWc);oɾr|A=L0J n`Z$ qj|xԖ eV1'X.1D1%b_Sx^BfA7.e.W[NvaM7:#Ip^p_썃MrA`?B\SW=Ȓ(rh-q5UO=]UݜUKo=])|_,`Yg7l-9зw@;'+=y?k( M xzYU7eR:vY2Xfga0̬; mVg_( ʝMAW^mJY.msO 8^_]ȖmmG:C}BKҭ텅LBceoY=Ȗ2n96lo:SQcmb}f}Ef!W R 1=a:*e?IoxPQ '\|)9Bz)\A‚֒ahTɩ8ZڼkM'ܼ[\nc) xJ*D )cՋ ʴBtDUB8)])x;kkqEe2G@ZS5v~ޟb8mRK0@Ljɹ荍:$.A檀N+cṢ=󘓱y}4d]h9oqf抬xKY2SH䰓Y@Bl`@:B3/9D1xWpďYغrZ5 GgG|/k;~s+zj8H`e7+DFɗ_|JnWY4LV tU8裳UGh*d:"h+k,~@IS6y|?H{P{L?}Vbʯ`lfW2~gr3{ XLBnF6Q K+X}\6Y^GEϵP-(\e8}(pJ\wp-|(bf%S_kRiCp u) "@>rEDY !I:CjאZ$yMiC;3R2sS OO>ғ'K h% N2Inxӛ3]@sΏzhnzPxW ( \{9kqWjpp(kr|Sx\x s;K2skWofMs 4[JWnhEɦU*Tٻ1H3?6P=ǫi=:?&5G[#HĢ!v"$ipT!+NgZ 2J1z8oDžXX? ],~6T|z8.NpЫ7 tl~| ? K}ӋK;|2xaA;fS[GjM:-qZɕvjGXY*B4zǡ-~}^;3BӏyojВ7;KDZ+mvk׷?_=PgR7`zKu!(Ebh18o3 RHF%S6dZ֞MKD8[K{. 2V*b1kuSF9dj .vw"eX"E4e9J'S(oy75qV892 (+;~Վnd!&jNް=E׼ɝurR7hgGYT{Ö\߾?PT^LN;+ I3a4w*pP3 -*01`힌)J!$@>|^ q,tJ`U Jn)`Tؚ8O#,e7g bh y[¹D&7yŜ?ߎ?n +Glo<Λ4IDtM_(56* ,jYuڻ$D`"6<60cC\*/I@ALPȓ82D02WenM aar/ݚvڲejw JB*]>*'H(NȴrB .$nTֶFbA/Iu6}3"Dv$AKِ\p^CQR1y61P3 kؾ,Ҷiima[0yi$^UR-i7þU 1+]n$a ۏ0$nO?[OhL(P:iEԂ&Lw! OT:SMJG5)EK5)7+I51&F\N: sw+=~,S^DAI)I꒪I@ubH]Ou42QǸָ<+`$27^RmMd{*`yAf3~!7:4L d{O`A$;4*Z J]URI:@N @ƺWܥr^6M1O4s<\\oZYMm &n~T5 *§b0z Ix fu7 aC0!'ep(a܁o'^;eʘiV*ChmMix? Ŷlh[ # ` 3ї0:\FzKYJLYPszZ&] RL*BvbQPT%6r+(fռyMq~< ؞wclRDoخcV =Cl̎hO ~a߬V塯>1rk 6}ч ;?L!ΗI+=Sʪ βla ila@X'Z;.֏-QOl_hS"I hhpfIg6BsRR r,b`vn ' .YUضZj+nb1VIS!UWP{;3s3S4pCCɭlq[B4C.#o >&Lٹ俷:p7Vpt6 /ؕ7`o$Y/r/<*Xgt6 w9k{x1+O#׿gnXFV\,(s -G$Q 9[mȹ?Qyuo~X/9pʁZO%7K珜C|,?lr;&KB7};eyhqO rw3.|/X٣B9Η|Ͽhw1[v~m󑌽2eGG_.fy.͟d,vr<,Ʌ0 :lu 5R~VhDĮTGF5V!gPX djjv+Xb Xiܷo>Œ%eǭJ.Bb0GRXGr[{$79;T:K aY_kh^k#jN>9rCXR[X8FX0r9% ,[ Lɦ\td+TZ=In7V(O'9{PZVYFt-DU1=@,܃%|tBi)ꯚrƪr+o)-,m[CLn=)p)*˷ 5sbr֍l|i-0>,"7A0Te1JG[y^hU֑j 8nuDANmFD|y-R;gʖhǩ/FX] .⨵l4r`J5&At~2&7m}F謧erqC+5'c2^UnMF'w%BS7+HJ~;9.eB{$˷턴 i(}q|y.e6ؓSbel+Rq͠.hiXOXPu&ݡLՙq yc8T}V,',VA8SvgZhLɝxK[o| g~>/ߖsy^\wYg}"8Ag>Pwlv3?e|H0~R5dbPF+` 4=uS}<}uӣዉ\L9(GPTÂnD9-XM?EpI$+p) *ĠHڢMU9j\#0h!`7rG 8jm;yfy1gCeWSkh}EVUbOv m12'Ն6Ct=rqr[bB9aŭO~3'__ak#l|۷y?;E\X^zę˧W]oldAPu՘C9/ qNvv&lsGd_RqF0S{%GjZ{qF%T;,佶п;1yr,e(2DLASձ+ s<_kIjNNǶT/igZ ]le U1bxdBF)obF4sB2;V[*s X6 [S8@+\R[Pgj7rvt"A߿.1yYk㸽rS}=nb]9ŒЗ/tސT.jT-d3X%J9 :OY=ih+u0)E1"L& A{43r}a7}nC`5ZX?V_u¢f,ALbAYcbmvE+y#2S}%8CdrvDbā.:֐{c09`X@/r;q+h:MF+_SmM()])6eL%ZO_u|Na%%)*`Zlfc8]Vo>h˶(VtaxL}Py\rZ#-b")G$eL1wdZ82/!xm N Oee(P1`NWwWUJ2Y9kCy&Lu,[` d!,>D F'hJt`t0ﭲd (␲vi%'-%RѲȍRiHBkiEVNzBB[fgŚ/:N[Y7v! G~p+b|?aQ9sX9~V3?NyjpQGR8'm?^^?B2PjJ|pK@q0L\Z$x )zvojrt22 ƭg'3 `R\X?>a6{\lU[wrUj7}>e4}>1V\qx5z}B5Cx7ϔF+PaA+pt8SghTY@W5q.U]7?{[~_?ow:7 FZVmM¯[ |?~R47oZZXV{=uumvyE_nW6pvyjTxGVf%OW6y[v$X yx>ZF_d#H q:V۠r"-3!ndԘ1dE6"U{6: sR7><x2B9tsVg X̠#+K*鴲WIZ ޟn#ئ'7:tzq2dVua;cg?xD{[gM)yPOFB8KFB$K3 h c!,.B]ObmWWiu9h;UsZ_4~$&Y*6;.Crda=2Abp.ifLb:9!T +qٗ, .}b^uw6;&X^SVe?NqPM蹲^OؠZ6 rwWUS?g`|)Cxi@rn>Rɒ3oɍpUc-g`]C݂bӉZYFRAgT0'?c{٧ۇ_O5"Ӕ"\O-`(X*~A9P6TR^ujGGk`tuhi$Hu}⺺s}=t 6á6҃[L/yL6DU`CL )@.2KəeNrt&Űy߶`BJ:aO#JJ#Ύ׆yJBPmByȆYg:e!1ܮ'J$ETJxЋ#B !U: Q[F"'%z=%Ge]Mʨ}hO6-B"`2n,SXt{I<9iϊ$G (6oj 笽wSrϵ9Ѣ$CtB9+I212x\Ja'fWKNnmMN)ؔ%z0hI<&3xIs-= |&%նfl5(?REBղ..yEeqb5dGx<"j|=$8ë~o<5vt&aIb1$<H<:CV*v52Y?!)ڔD A!$ 9e1hl5ra8H}ڭqǦZ[U;` $P⢀ N Lx磎V,0RR2G@Kl0+m| Vq !DCXZ9V0RVE2S|F1EtbljDiN#i%ɀ]ֆd@B jH\A "DhkbE;I$m%EKbQw8}yF|"ϝSn?|z? YYqUhΧƐ2eo.vWe3=qtxy*Bqr\xjhp B,Wׯ#_LWLCѣK?pt$gؼnG{V [jQ95pBrP$J+UdY / WGS| MP9](;$7J'Gg A^ w {wsֽF7̴Mon\PG;4y߭~r4O5PjZr˸ʤ*znJ"n^7+ͧΎ/ ޽P+6.JL8.Kn\;om TzAUh$ѽ͙[F& menݳ-K2#" d>*MFXf'UF$3{L`WGџ5,; iLv2ǭ6 kzIH;o3ٮQ/Gؾ:JW5K];c9i]bI9jq^Y-8L8B/ 5g't*uhpO<8\,A!&CIȣ5.%g< 1w @RҡnqY,$s`.Cv"`RdO0.Qk<H'U 6>Y~@7y(hcqr)o|Og?5?I$RpD.i;QE_b^ ˢ@|JWF:íg#I;mExg+1hfi'n?Y YBKV=oɾQZ̾!8lf1˥Yl]mUOҙ>iQC!2W\*Q"ud>]X/7j]z iٰvk{&gײui|>/u Fp z/jHNXQxIU&ܞg_5Bш*ۋz5ʋ" g/AkAȘL ֋ZHR).T>J1T(.׹EIǝ6fhdn9ICKzvs`dx a\غv']{77Aࣃ1Ap ԋ }/*RB F{{. }/QBIOrUH OAxWsr uٺ2xoUHvi%=;+Q([[mS+<$Eι,E|>JP/eA:cS5r ! Y-k9Ƥ})p(%:2c a6m'k5ݧJ=wZ_zWjoQ}*?h|{Y*WEJǡX $(Uѫ1!AL !$5ɭgg9> K,粒RE``Y%A'G\d8Mkd*ErYЎqbdl( hݶhcR(2-fh%N,g5r>ڮ>JB+VL+`<J!h gXȠJX2+&eDMLQOЀksĕGPI0 VpV[|QBVr 0 dZZq'm$ *RT/eTXr*U@]`L9&)V'R>5ذmOv?ײLD`Fȅ^\qa-FH,7Je#.̜a1NZm#Y3轩l(|%:(VJk<'Q9c)ʨ#j@yg #[NU8dS-K㲰F_]ȁGy=%Qr (ar.6yb+`I_BL$kb0Ḧ́t9H8!*_ p'^/ Sg5'Dܪ3ڰgL.@z2/Ef/"47 qFh׃Xd..Y9E`^np}t,[]iQ ji +!JX,Qgښq_Iij{ @jfjjgK#{-'99[%ŦdIJ-.) s ;Tyw`mIlmuڀ(!Ĝ2&S5}^| eO3Y0;NY h}Hvh0 he Ʀ4a4$A=<S' p͑m4&e-eQ .YTcӂl] Rb) *pTJfЖ6ULj[#H(VHn@7qcifή~\^!?l~$?oc[as} oG~̯˗Vښqmvԫ]x/.su"Bnm~>EpE_!y#k>5? z{m-_fax/!YlMhG1GSE_:7vA&lrh }ۊjպ3RV/E9qkϺ%$k@z'avgah7ޑ 9?h鷹tŧ6s32bIDenjm~fY>ʁ-F#y* ŘKN'T }p`J5:kQv<WקyBg nsc!+:QЫCgEOAm4*2F[X-@B`:e:h'U) HU)o )f=YO S֪$UbFwyzyq~~g^7LS/|`T rɰ6zK䒫M 1@s+羜ώ ޞc)-xrCY-iLp@']3 9{cCrL޹T,6(˚d1Aܖ `S> 7 CfPI־`|*o)gL5JdAYȄ[踤|^-!Jnjz:f<#_Q&+˄Rʲ' +DH$#dL:vD:vUR;V8Ru]S9t9Z sC0h fP0_b:}hơW:т <  "ηUҦU{\exbjo%~tř 8`:69hr{ Cyʦ`;{?ukYukyJ f*3idn't;vCz}yKcup[>`!Zx?u]B# QyAM]JtވNkv'X9p$WHkG$ob5$#E1T5-B^lGqh]xsԐ}SC}eqb,_ȳt(2o R M#P! .!!`Vɱ|==4=pɒ@T@ńZ hB52mo[21FWڣ֔WG]-F^%&e\&ds1Q@RgMKg{="^a#0oMs 6G5Vn$~=F2us #/A RN}wvՇTpx 8*lMx^^s'ppGphpG⍸ġgeK9Z)g]9*L>r$hBKīpL>j_8R|/4s<\TYkcEMm9SLc: * Nő-y((]qhVwpF"o9\6;(o90{oD#e,|[Ò%p5)v= GEC )jpþy3^vFlhc )h%o@-)d1eAtLrU0Q9)ݣmͣ@m 2u8B[X눛?Nˇ-煕2p-ږ]\]%ݿ}~Ǿw߳ϳﳑ6M?\q2{-!nra=cP=L ~_Glׇ;ݸ!Yҷ2Ke [=n~z/ٛ^ 3{7kafiRޤzbf줔 dZoF/|!28f\@6Gh,pcWU Tr'6>tU-!6Qh*&9+32Ml9FE))e3@v= 2 1Y%-*hW:44k !j3yekӋnl? =ܱt -jCN|h[NxYKG![kƏڎzڊWG!RMmőd3hCR } ȚO-&AOa/2>&`'^ C*ȵpj&cF\㢵A-`WT&>Yij#cƲoUzÉ1$l}c#kj O6 A5Rjtbus'squ}'3jz&,+g aɪ} WH:G*U;M( '9Mi,?K# [WXꁐ: h$> UL,&e4i}??GI'F2K=xSՠSȅa~>0^ꭳ\]顠ND^~юrjar{gVO[~SXU&aGjc~]EZή{3xgVD 6nͳ˰|˜wN `c&jEJfk.}yLlLݔw5c/I/h1S+KQwh#yd ^)L`M%)k+0Oü\|XO;[cm[&?Ȣr2J+[Jg\xꘫ*{R+zX3sP=(]z.Qt4ϭ2^g6J:W<`KqwvvkMU6#*JFBhQ V"*yFL,Z!+ZTbV\M Bg\@]D B\Rd7WPCtj7]7qfCПrVhm-g֘b||)bh!1N}@P[Sm-V\1Aʭ E*U+IsiVuIN-gٻ6r$U8ܴMC.3`0 >.Hr[ˎLrXTȮ*XJJiJ@'Oܣv.hVL5s"i4ԕ3ϲh812s KZ-1Bl:`މrV-嬕?Inb6a$di";aeZi QK4FV'0mR"[BO?2g/=A}̉D+0 VpVrĒs&R)jZKapAjP+Nɠ"U #^T%ZԚ /Ղ)`:QEq^5qkoٓ]ckۇCx+KdM(9&⊃=c":ɸQ*(t$1,I[Ev9¦ĒЋUcX]y6(VD$k<(cNbN>q!`tmB'% FW.5qxqu(QCvQ'G',џAjҴMQO e]2%d@2:0MA]!&у}<өLhxEy0'}T(sv9j@-Y L{kQg]7-u[ R VigW>SG}4P (mD@?w/ }`KG哴^~Ǯ|?rxܚyê ]_=p_٧?? V%(7lAۦ BxmAYeg ë׼ӌHN졤BBH>)&Kdy2Ɯ <*P]$U V` ;$!q)Cf)9ȉ)BdwW6Xen>JFE &a6KGW4xVjqnH)8MI>evãg^LgӠ("r-iӤÅmswv|DdпI>+ymV36h}Zc 3P˖[J\Jr{o\of<^qR %!\ý 4kҊ%qR(-ǾeS:P: V.6 \}XI L:9i)ҚL`!s}ҖBU5#.yYdFB|8a\Y!i-Srn0@`Jf#yfg&n|pj;UjJƀ}2&4. lrd7 bc5eЛ0=-O`J'c$c#j"o\־kwߦVquGr*FvIPhme\u}1s,?Tpf 14HE㔓MIcЙ.gO<Ѕ|W(D(אA$ )FQjBH;})fVJUAB CTKa#U!"'$i[Q2O4d֗$:=&u'`4~:p {}av 5B5C7wcjlf7NS-<9NqÙ#Ԗ[ţBfWh4B)&$K&xA$Օ4:εsd[O&0m"scT“Pj]Ff g}ay%'2|u$I)%β|$˒?5MHK+`dN424@U%]UZY,ۅ Sd$N1pqZ(\)Re z>C BF_+n/ipoWOkCL'vV,6ŊՑEL2ٲ2KGKco^/۷w\ɹhcQ FFlTYS6ސ'A@-T#5T W;#|=GD 'K[#c24 2тI!#:KoPjמC ,Dه(-UI&4:xa VY-E LY R]A%eA%TJ-x`L*%X:T:eQ gw'CN.kqkGpf.-8?ȏt%\';8)2K7'?w;8 N8rNip?OS,oz8HIqKy-! .[Zȏ>_'kf[K]>2FLdJ3̓ӒhޞϹ~ }4CZ;]V-|iV O_/_j8N+-/6GqV_ M#0n1~u⌖^è26MƘf{ŵF[;/.'g /ytn%Ho0r9is$[L9KVʞݐefyMBp0l`ŲW_ߟze:^W*!:.}HXu8їc sc~{é·B~\(Br O"Z?pq?_e*LB=`S2f28y\2 ,5(DW@z 6Jݲn- 銗u6/4!}:bV,z$H$c0^UB7:Kܟ'KmǺODq%N;?xCo4L&Pz8j84Dw'N9rL1ڨo9ydC a@Q*zm$B&,!a֙@gnY."CujNzeF_IU7G@D6^2(gE0I6FƝ$lavɁuChkrJ9fE,8hU&e yL6g &͵+3)ښ9[$^REV]u E oT\$MonYg|5?wOAχ7d71H0 )#Q,54Gch^l Yu5"K>!66Qˠ%!$9e9bfڅkYc -qǮZ[Wڪ][ 2@ЉIS& K!i0F\٪0+m| VqI2!C.Ɋ bAj@=W&1RVE2کͪYF}92',P4b5I#*kDiN#lK\鐍9$J9bl0ȘLkGVm],gLY28()p\LH!Z57J$KHF=YB]q>Cw!Y ;]rb==EŢOb3q]1Յ>N /%K 6~w_߹b^_FCЏme>PQjD|g/~lD+E?LNO3XY8߾h(psCLL]Kyt<đ>hR]1}7izw&-Hi6a0ϴ -Rbui ,a0 nT.]_˻]sA[(lVҰ#vWbkJ8 BDE^W,|<|g9Ӑ/ˉ :xɷ6 ƕ%[̯)/ιqô)jR҇_SU&ӏLl׏>n쮷qiu%{';yOdП˱S/W7w/Os4ٳݧAkm ҕ ]|u^+?\yty+Wxzow?\ _6b>};E%]9ZyU܆Gs+4bѹޛCBe PNdgIBѦ:g|7jzzYdeZ/eOq]Reƒ5 FrqzSw݅a5>1xeqZrlQ n7Gi !uP!p3k}=HJEY[tPP}D=)].J|'TÈ_żgW9 Ђ!qib::%42)Ț~uNpd6V3kgOí?-wP+߻wcs?NNLS`Nj!ўĵCu+pO 2W^4^nyAUP\%k&31_`XXɲS\qG 5޿<ֿВCv'&o a&i8mR)oq2xEIy'V';?߹f^7`xNqq=u 9%mV_9ό:"HLҌisLFr9"e $Mois4QEd>ihBon|[mvVu>{O&YЏ6XvP|ҁуÆcn0ߨ*sF!)A-8vj!?y-ۣJT#ejKLsENU@`d"(JEou }Yx3mSZMB$zd  $<%t.Op߻,6C ԯiD2B5ExD+Jf|a-~-v ޕ/(Gi MqA=Q'ŕB"*arʡ5.]IDSAX~1B,y44׳ 'RbE4I $ϲ@'GңF ,U!iPD-zXv€ SdIM :ȴ}3mJj`Ee=F΁z՟]R#gP.wvjy]:D$(,`2 bKwe72Ih%\$  F T/? v?;5څIr|Lir+NVۂ9%!=XM EjUst%>Nz]՝FDPXҗ x9P3QEIk2"d*V ' Gѩj2qdUj&FU'w{Ej&kgdF4WM1l"K(7Jye8RHKa^*fD&FO|DHVW`6}+BiKAJd(S.rFeS@lBG FԫG=X:cˑP]ЎjrvTn87>.iK g{0?.Z:eQRdJhf8lffx!Ws^3 J3pqk`K-WR o$7 b3旯[ }XAwT5i+:4Yns<<E*#DLu\(2( B憻-U ,0# xMEAOkqk^gVj*z³j}uMR?q"a(ݾr6+J,[۶yt5㻅ZESс&yLϳ>b̂5ٹ"yQUnQy^LBt 1X*~w7  !ҞD\$gxDڞ)uHR(~r]{SKlQŎdPzJFh7/-ѥs"?WHf&h4HD eSNu:Ӗc$&᩠ Oi<ꍝ(x-; L2=@qZz4YhrN0ee~| J~ vsTܾqsl5]v =Z&O泴#uRiBATpeA%fSRh`R-Et JۈkVWruB5"IHgଣ8qm"qcT‘RjU#goG֪ݍӼ-\~w#˹5S厸,kFRg̐GR=\0%^0Y i( y*Hf)ev+ė f74Mh{CNB3v :4kږTZ^A'焵&ɻDȂgᓑ@ 1RE 2d#*ML){2j涖G\M}y% ck} vU t]mx?R 2Irҥ9-h+D<[> T:B)} 1|y]ol=HhpUiK\jZ}wKt19N'.=^7L|^l8%7nߛt2M N.繢$9Fswk 5csA ]L0LfwZV)"fPkxP!ǂAV R6!H5I{Y6ijda,-n g庍T$a}f}zx5o~nS3}GH@Dp|X]HبTP,Cfi5\Xfl`;8˰ɥ97 &(ISb`"|L+0b7&È\ j76:Em0j[{o,x9h`%:|,>_!²D˓r} 9sev .!0V:J*=af)ݹ BmY#aA*V)#YM!М9DԺ(t2[xn0 MN~[юqLZMd4 1`={J"&Ã&:y$@b>Oef!SeA5^p gkDB>d_fݎiLuM3Ycuxi}G?3;K5%??Hf-5%aOpb_|S=>;N~77c'n+FlzpH5* C%vxIqAC; ׽:/cה{$䍣/棕Byt[\P WnV@{gy(O|;k6PXlOu}*.m"UL?MW""~_~}yś3N޼|w/s%"5F0OYSEcyӢUn֓_\d⒉E Gj%! ٿ;P_cϮW?z0^rnUGHĢ!v"$ÜuB:/X*CO%(atDJS!XC@!N H8+H6;\+,%o.e[:AIƉTZEs ̆bޥc=2g(5vtSZ~5% ǕH$GqfKI SW4"hrE<9Itd^81 Ry2!,iMY#M# qC 40b hf Jq&Τ@Gs4wYH|y!&;f%_;ǻ (3Vd6VD9cs!{xQ8E(,e(ͱ"}V0M+7j Mn'o#w޾fRJiXn\Z0x,o[.ld8]oUi> %.Hm+_O(Qh!ߡ%c=\>pQfM5&]leqleiM,V&7I#Hǧ t_*K+w[fk l* l+2p\57W4j Vj*? z*Kh W \ RGn]bh{?lqv U.+9&>d**::~8s  U/SUfN $V8r[2_ cd:o~f(Q1 vRw:K>^`=P`)g /?ZJu-Kٞ6ې՜7_a]+wz}^8 j ĥD{E)XϡEG cy^VyW5&s0YPmtⰸ̡F9WS˴Iag _8̙__¤!/Br1xQwxIϪoE)7]pu?([a\W8V|_i{3wO J"yC7K9J\:ރя̤#ˀ9VrbU2A?[26rpP?]unlEMOe"uÓ[bE("lӇ*5nٴJC`<6G?ʃr;h"x>"O n`w*-Px)HR#nyuMcS=I82OK:K0?& GՔ^d׷d1.e/-WMr |P26\KЦP.OI2q1&ړaP GILdd J釉qYCgߨr5+0K(fB8zqgMRf32-f b3e唃 3IiKR&S؎՗Iz->1`!¨x?L:d!^2e̪Mzb"@" %!2'A̅ƒ@o%r2KOLZΆs<!KEb:ڹ/GN|=zSYO!ZLS寯Κ{=e9cz79b?J2xT, -Q#@,M5rF2/HyЬ] lN\;G%xK݌zCQP<7F$T96XM@[Y/Kq!1ëή=[j,WpY+:f'f)N&m#\JVfH3uْ FPP#+HBTU%4ב}~dW%FyɬH1D?pqZB:% $u=#~Y[ϴig yɽ|xz,MAMe:I"eLbd%65sLD}v۾k0tY(Q2G0V`![*:k9zR 0E f'Cq)&q,jMDHtPi0B$L(h@BDgY!gkT;?C,dQtV%H:f0:xi VY 2BqHY RTc`};,9hg,1En` ,HC ZK+ʠt# R}tE(UlMIr||_|YK?/G}'@dzoW% % NJtqi_ ћ|ɱTkp{N2FLdqrtC֏&5Pޕ&ogsO&uxӻ_ӿˤ_]\.TN8N+-/6]_ 0lh#pMmuF5moaL{夐\䃗lIg$MNݙ ?ӳfn۹Ñ`8(KZ~=ت4_R ^Ūff&9e63X^& kD3XftǷm/W rUou:UURގu\l8Ұp=08G7lKjnS qJqg|9<ɱv9M%e*xzipt1^'{{TdY1TM+fAsg$$ߑ|O?*~|˿|iU ]Vz|^yR47mZZؤVG=MumyE]lLjznHM6AN~3#AmGsϟg(Ɣ\c:yZ~q/?ZDH{:V۠r"-3!ndԘ1ĢPsrTH*=z]RGzÆrjrkO@^@x2B9tʳ,̠#+2w6Vl:cr)>߇3&Ƨ ;JlΏ DƲmDz:/x!Tպv$|a/uJǻ%= -ާYzPohp;jm?}4I@} dW6pུ $ 0y -f6!fSBE>ذ=:(4 ̽I1x?~/܂)N22QyIw> W>ylXV@%h6J;H'HE\'<}Xվ[?n O[Ix^%>zhq>n|j?h`jp2|[|" T%WrD,Sjm4W$(ߎOl`7e<~ɓ+zrM1;men>97Q,,rpR!8\d3˜%&`ʾw?/)$užRIi1T0o]IM"0LpBgܲ]"""5U"0D䚄"WJ][B/"䆺)A`eSekY/xZ`5-Ge_u¾˛C1_Ni:ԆY%n1 ,}=pӚ}k#'<Цx?l=Sq)'ڴpR(ge0I6FRi+%'o7=&C`06e6ZeRvA$ `\KϹII[E){хBղ..ܩ.XwXY9I PEML;$ 2"RGShL2" J^l Y$ f> &CR)Kp$8s̢-kֺ^c ͑CڭjmݲV@:10E:ZiTH<{Iq" uWZUMqnay܂sљAs=X&l+8uq+!%pVYDĈ) P {d JKm&*t)f)vK#D.ܢә!U.$1=x` ߞ՝xrJ5秃]V.&Bmtx> ë8w?ZyJP'qR~Yғyk+V\t(v9((8Bx[}A(L:<"r@d}+eAqL`v"XeL2cŻ sw6jwavrFYXy\+=ը HGEn _(vsWwg\sYmMR}d|w;6z_GN^sɤ([/~$xw!I6= Ն:QbʛWBh~Th$ܕcr:gz `|вIe,j:šWp{1YgG K_*W]Dn-P4JI{fvP':S_i$DZzGWMvxtᗙjba/z}p݌|}ffjW?}(MOBj vFI4O/Be3-65e&ҜxNlH <5#'c$ EU`2(|:iyƥsGa=&}NHJ:ԭ2n KS%sp%cPZGʌ)J=q(Zf$ug=#!8V=xt娛.TS !3|[~W7_<5_ӹ$RpD.ـQE_1seJudHgul$ u'2}#rED-k @2xSpv {2Vz;H+E.O8q3>3R}sN{7|-fB99LI盍XgyaRm7qW`!$f/eEQ0cz9N;?..M)+@\uTF(*+UAWeKmwm_oVyԴA8!c"u] =xQ !tDL\I)F"2 d%_m]'-|g7VGaf^-\?li@##Җ[ lxfZ{_[%Gw u&|d,B00xn۔Zw֛2HCW FxW*u@p"Pڈ6Gв ;Yp"m jfcwp/xsG=#JWcE3 N @,6$Y&$pZxDnӑz̵VvcS'k 2Q5c*9 HNh-S*rٻ6$ lFC9mp{`܇A?eiR!);NCz=g`Y {~=]j+Dp}:)xߪ+=J 3Ktv+L8ߑ%тLm4x+}@֬3'VRv`;+:r R>V\RB4:VpOa:j\<༚JAVFRAZ miN/&5ڽع`w$KqR)[^yf+7 *1T[fsϩ4*ilQ+cդԦ„{cpg}M@o;޾y嵕ϦgoB\w/y!eFh< $Z(c4|Ў*gF$34rM%'Fׇ>gΊ xCq|xYUklnE2rܱ:~"Zxt3*)H"u.,X&Yǣڙg I\¤O33OŞzVRO<_X] d9#ك.Ip2!I,kfmC<0. P% #OՉZ/saA*.$0wj>s$8bY^"Pz鈶7 үPև hp@ch= ܈ KM8*p}&B9#~_zS IΚ+N,Qc*:cщXV$l4qJhiH&m2p5B)V)kx))dLY-G^GW=[EaSFs4Cz(!_4=V>WEdA!T\ԍO?VNW&]^ څ6 2o9J4O=M-eDہR lï&JWiS&B"$Q>imHR9^G%^K; QzPYJbB1}5/n 'Ng2'1mvů~>ۧ~Fٍ =q֛w"O1Ś? &ˈ3f,ggu阦3gKj+ERbv+iWt,jA[^oQ~PFQ=ejHR FUENπ"PnYxZJ .>2Q{\D{ZBTN*W&qO[B,c%a)yRxTh f9ELRMN+@75Nʉ4I*/MN:<#P@qF'~+#̵Y^shH{={xJ(iaɓ,#q#6 <*kVɎkƙ ' ASeIZ[U,ML6)iO8ֳlg^&5<|gb~ 6*`0aaz8c0/HlWdUIM[?;4ʅAt lȈT`VP2X]R£K\xfL,e ?Se%]oKXxR_5y~84\av!?ל4ͿZï$҅%l6n+ g&G>l{KDTӣYZMGW7SI^Ҹ|׀t"aŌ`b_zYI2*?כXXꟇ&}Y񄅉W $e`zD61Iel"s.E:kk*`$PO1&d潆L+MxnHGcp6=fm= VvimE/ؼ4gߘO$؛>'2}Uy.W3ǀ-M१YQZ+J(NC͜oLb:S#u;˝B`6l6gR3nnS}*.ƟͽY|Vd?E3JNW7nMVmEXOgN6Wn|ō%\ mv+oB 7KBFo}yj`*i|O fo|ͮ{OZ\/hM{˜ȑy(Սoh8a4\q z&a!o6bumm7>UiqI>fҗ=7?oYbyͲefģ)G }D&h5Y[on]Wqpa<:#P?y{Z͏vpJ#דBd§_Yv%tu|r4v7\K^op>(jUvQ~{1kf8|W2|gz7Nf{(޼wc=*2^>7l. nz;}^׋?zNդCt ++YW Ѷa=]#] ih0 `} ܌h|Y-K_B5R lѪXͯw-U1y$^mSV"C 2?`- buζ+uFri.Lӟo8)wH;d6`:c6 p2A鶛 l8CAR&9]!`k;CWWQBt(%JX͑eKuQ,VEZLL)4󊗁Y.sD[>jxnϷM86ӯK6xV\'#a _RB|Ӓ1-ӴV< {hvU1NaZ<ūgqQ+ ]{^&h/$8U<URtL*iiHTp@jo^ԻF&A[6EG*,m߀\/ )~M_[x[V_Ś 3A?|}Y|7c^ilu푪5iճmeKcSV{SERMy  !6++b4Lȕ";wDRtDZDFZNT KBtam+@)h'xtev43tp]+De Q.tuFteWO|jIOH?G}}U gGWV0?lJ :넧!V@#e'CD$ ~p"ʵT>d9OR#Sk[VyI[<>,\{Y`{Cj,JF5 f{U`.~)a9K:q-e3*,ET)LoV2Qy%+tvk9-))*Kc\QqM %K4 i_4 43if Ct]!\Zv螮ΐB6;tp-]+A.t([sՋЕ$ty8>N-x"r:CEvӯ=wN>2Fg@&AwuQ9SH/DB:W;t'^/̚>W,&sծhDUs-/lԒ+'Z2ÜL`18y ˥~2ϙ64zi˪=FvYp1[E?2fiuuY~sF:Z&*r);K`^'s1A9c]r0'3.6e+.6_DJ.؊TJxT%3dR wJ7@Q*dtS YJؼx>>(钩쌩pUgbMi} cMTji:DWذ坡+DX Qҕa!B/P9)Bv%m+8!B9ҕClu [HBW=]}1tzuIQ%vO]2j'Ɯ]Z"NɻJղ]z-mg:,)+aW^IriRV2u~SNeiQ34p MZJYiQr4㒚.f;V%坡+D+t QJZ!+the`DeOWgHWBIE kBFw"+DxOWgHWv0upO}nhl;]!JCz:CRBѥ+lvpmg֮0/~ Q~JKVqLt(-wϑRui+ڝ@:c]!cx8]0}OWgHW2t tlԢ/C//zv=unpىЊʶYWz=]?,&mY4? * \rg߾Xς?{WV]!-%$/ -:(] /m?ck+.d+$϶-I:$/==: ) M[>a?^?v(~4mm :] \Z~t5PF-t޴?~`ՀPj@I"]9e7rspϋ6vj[ܦpnOWQ߿k7V%b S~7+6Í|x2ܰ=[,Y0oNG+p8%Sb&)`m7<rsoNuy`T{4ͥvs*7A9ͩ}aȆʅ9ͩ7怤|@R p)>K Rl"՞T r<`OGJ+VCRWWCWn8u5 (qO>X`*΍$۳ qb{_ON6KWkjjqՁWիmb' Ѵ?FdmwM(_|Mҙ:3C6yW:9YNoD9{{֬ԛv;O'ˋ;0x'o6Mb )rY*ˉo7f&Zf~짿|)OM}< ;*j1S.vĽضf!Gjl޽0>Uw ^_X>>U {w̯eb>!<,jͧwo3S1@0f7hM;]4tu77A5hrʤYUc]SVWRՔjM8jAUCcy3tZ}:PJuUCٱ:,%:dՂ6'BIY+DZK!B +άckds-6hFk۫N=hStهl$Zt--SHXݹR>tVv6CI()5z-pO#Pd0vcF4CcvW|D\L5CёBE5䔔5g@#5"{}knuɺ(sm ׃ cd,TD2; 0; ϜK@cV9jz=$ m( Qfc(-8|;z]F 8z iIn볓Tگ8p*g^ ,srsX|؀OB(R>ޟ7'!1NTR9 S $]1ڪȪ{Qd.g@1ё}Z#vNR]I0Ǒ֑ ~B_oF΄|.`5 j}IX(0gk]6>P4 I#ڳˤjH 7Ҹ2=uj,Zad9Eʧ: VAfYt7H!Q!S({ ٥fݑ#6*ˌWhBv ,;s[rݫ N7h A^Aka24Ouh;o۴`g(*f DUXu>:_ Ţ˳եcMm̭>X%ODHY`(.t-a64GV15k.L7m\UA*c3$XҼ1 k9sBE(\؅ڑkj Rh (dJ)Fs'+UlCD&0VjNdd"LhHpu gI: d>b֨(ױV۔N iWP^q3j,7p6L&jlU %Ә;>J͐jPo\?1u2:0s@ rNw̥5g@x`1g#~ nQ?%0RHpPg UQ r ֑lC@?V!jo\P;S )ti7 vxGo֞%@Q 2#}@PS(HnS]TIr"YUDI)bu+էb94DRH LJDA e͜`d5>"2D"ӨۃB6p 9Twn-A L;  +^̈KUU+f"cE-2( / 3s?`rv\ee:;1Zp `cg{]`XCt`-$ >:%AuPiP\J7#XLl#IW:XF SQG a (hV(3{ByjN$\Pd"UB5 CPk4X yy`4/9&AN2ԭ-x $nGf6 *YȩՏoTC}^شXżs mljBZ!BTD4X2sx|urWWh1di*h MKе 2 XB;KRn ȋY T(ѣB- 1sPGmwu $$hLEQ{ X1z,2m!%TmIJ X:u )ŕP3XU30bmG39O Ww >M'iڋiqIr"Ղ ]@wH7םkFOʖ6\ݶ'|vR%;Y:Z53ε&P'SFCoׄ bL$zz4\}YeFĪyväDy آsE6G=Tʍڪ-`gr%5n 2TJ`QLYځ d\J{Hu Xoެ7a7Vqۅ+IkMkׄ:H' 9f?"/(V1Z)q˜Q -Fu(Fb>Aw7A lU?>bVRl7-V=96Ttr2vt׫՟&;N1{|g-X^gG/_j^(r|濎mb|ǽk=?;n>ٺ?ބ]]貭1ڸiz-=WW4GFpUD#'䆭O?]>.eVN76>I:m@"ɵSϾݳgi(E%a.ILK!F0!=?mOߥu+!⌋P]RjJw: $%sEeC0CZcUHt:ƃ *?S4XAxg`%+1XJ Vb`%+1XJ Vb`%+1XJ Vb`%+1XJ Vb`%+1XJ Vb`%+1XJ VYUn98|;op8Z@zN rl8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@O ZKf^'ù'Ѐ8=z'X8r8N ~Pr8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@ {: ~i`Էw؇o.o U J͏ {8X@v1 $N 9p9@׈H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q='[ݽ>~<[MG7W?~.ՋEY=] 萌K썵c\\rc\/ ĸK}3jwu{xCC\rdA-M6dBd䃅rG)g| V@hGya 4޿q> `C.C6N%G'Hv,+ȴjuPj}r bt!}HWp7C+u_:y}>] V)ҕc"Xb|:M vpkHCZV[=×ddD}%pz*+͋+Uړ+Rr*peGϮ\U treq=*Ki^!\YB'WY`}:,9c;\e))=_Co/Dwקj;qՁ]I܎&JJF]-ʞj硷sCtąim ؼߔ'Y`#OQ\NtP[ I)_!L3,?%kz2p5TQ׿#,%gzpũ2\eOF \P>\ ::+"JpN8ry*p*Kk*b={IGanK5o\'9 |QJC:NToGgǎc] p Sq%Z٫3Kr؇YGPog(B -'u_ǖ\ӱIu>070@3({IuyV(dNSb<-Ê~-|(>:/VBFwNw{MT{z} DQɩMB8 b*rb`BF/EGvR)VEZDL!4Y.sDp|Em ۨ% +'Z"Ü 5jəy=y~P"(Koqs!Z[F?LLsL9WoHmJ*[a+"9l- MzqQSȜi\Ĩ[$a%nQqڝ~Zn~4i|8~?|'ۙΔ }?!/;H(3xxYLR4f8'}A=wՃf_[ʹFbxO %SFS99aqY\R)/d̰D5u1׭!s!.BX<)L)s-cv%]Y1΍Ge +WWl*꫓/7OWZOg۟g>3$˪ɸfS,1"G&u>jB ouʳhUߎiSHH:x(Ӣ5q64Gޅ\#y!Fv<nW-,UonB,d|]3]~g7{>݇'ӫ\0vW]Kp T>o˗?sᘸJ)"ڤ4H : ƘF9ǜJP4RֵuΠg sTkdPܙ\_ mng%z ^ɳInz<ǩg5vaSk)*GvvGXsB0-=<$(.Ta*S}0R[#Eap"]b;]g@f +^ST\نx]j)D.hW#r sj+ɰwí$Zp*36pD M,8y_9"8Gsk!Y0$P!BJNWSdRńt=dCs𥇚kQՇ7OƩc~s\y>ek>aw3w +R iwJ@(Wf \f/I77sywk783u; 'Z>;?'~&C>Ux;!`du740J('\\),/1m16(NJz!~@R Wh{|oqcA0W0q%Gwn]"հ}7k ./G*y"lt69{W]iӁϺ6?&N\ ƄyЧݝV.+?UL. ܋|߻-w _&Woq_n BZslI-451un,Ow 淘W?{MoD U2tN6W#9_{0ӑܰpT埘N.aYz1N9UCɗ7}OWamwXvB//Cyb'ξw4S#*Tu*] ˝лF%~wo>?}|ûow?uo8.cT]4 [=鿿e9kilo47b[oҮm ]\25vM İyH3?yf-Lfߺ־x}Mle#g{621D:2^$'d܎ rъEMLA+sLQmw6ct@tn-t''NX+#Nt$Yf Y 5WIY Opg6A  bFđ9%&,RD6-#vkGl?%XPٱ+jQ[Q`,HJl`3":+uAu$Ӝ{'8'&I@k;#akʨ':F[;!$eDgD<#S3^kO"'`굔'S⾻#6g,9n5]Nzbjqr 7Rm 7#جIv\mT׹GLC*'^*FV\qE}"~t߭&(- xT\`&^9Y$k:a3';8n rLQeDGQ&(g#-@U"| ǵXR=SQ+W))ۮ錧@=jD=2:pB"#F9ᔕ@\[S\>b7{m(R]t~8 4:_CTG uW^29U-3٠<>WY0l(qL|B:c !,<TVHa$hNٙsLv DGwih/wڧjf¸@ՖK:e{%0Hy#'h7NX #@:"NfUUve/'lQ/+.OG&Fמ}PlxfGbGQ&ObYN{˲d*cX\5V,ȑȳs֐Oϣ?mT0KR ?! m9pBQ4VƳ--'^ʖ/mˉe*x,HOwmmX~$}^4v؞}Y`1FɎ n,SR٩8vEX~y.RcQ5.F3);%QK*Ff!FiRfGV81 V.3#΄8r 26\lhخz_ء hβ{,c5N5xj 69="r.t%rILT(d,X$WYƳRNy(lL86IJ-7qtSǬؕM5ێ9vB*s4~C1ӜN˥o]cJTUr%XI\FT,TrhQ^oתWQΦ*DBZOF) Et΄2D+螳r0F{_7m^V>sl]_?.q0f~[2ΦmwPLPOtա^RTj}p z-hd̶q`Cc ƒJDž^5ms?hƎAյ…`ʋPdܳx#y !0uhNVJq|SdY), .Ksɮ%U% wڐh 6ہd ݦA{`q5n7ٮFNb >:y&)Gj07sw_>6 &DmQ7# |}o#VHIJ0Re2p1 8Bg.^-$n#ӁyU 8gRgԙ,mw_-,ZOqо<8YDsOLKԏP S 3.xkzKŹt|,֟}l9)+:~4Kso;r\ hLc5E84AhYnGv[l?hяWsP|h%"Y aN"Z%W%8a4ȣ?ԍ>9pM&R;<׏ nw,#bGNNpCTkErgx:vg,[9sJЎ~ˑf ڒgf"w)hU3GW$0fo_h6&˳vXI֥r(I նwP`E\z[D;ʈ?!J%.L\+36}?Hd24W5rYHctdc5&Ѕdu$I*DT8=B-?&C̔ufxӥx[i*нHw r|jX^@UeGK!4o;ycN6 yNV /79mL]zCqDO_z^hIw|? i×o+V3t_nz2Y/wTEG{DqWƂt Ur+_R7A*xd;*`IX24q h8C"G_V#@+4F:ߊ v !GR|" S`j[p մ@V5mv{w&)220Pl_*ˁBd,:y dDTxiO& G*j2qWNv Nn_w#མڛ Q:p!4W\XgD"d(pG.7N*G)'mQj"r VWrʈ)8l̠#K%49uQO=` ۯmюjrvT8G5%l&ͤ"@̎K7:LDqE;$t&gr6h fy2^&sqb02ؙ\L~&/~+?3M ]q8/7(eõhqG;~(~tJwi4-=ZȊNlR g}wSoftaCN JK"O[%NLI^LOsz-6̬s64 M;7{f0NngG/_{S/O+{钸 rsVPoGdG zf}kHWZncp‡~/щsԉ` `#Ӎ17YZ?VYWiC]v2K-xœ8zrh])=πHAt.jf# C::$lzw߃`K,c y1+OrY.kGN޷f]))bÊYHc_ D} ( }d6h~@BeX Y\ \6_2+٘Ph`R O*!28:)ᔉEFM6eu YkC S i gYS+"2ygG,O\ˌ·ոN2_' e"ر3kICU~V97R88Z+&8:sXp .Mb#E]4$4 z/Wdxj<dQ%3Il#@^|Je-w)E08q)L#Y!`왼jj۔Pǥfd po{u<krߣE,&;X;&}D8[fwt=[zx{7s+Y mZN%aп ^d^ n+Bٯ6ι]v~cŀ=/.pK ~~~ӏ8q8X vjHKOWr!ǟ7'ss}+'10kq"kݙtf K_fn7?7\YfsfݗO &DG M>I+Q5=Q5.Q5TpZUޓ3Ok.MJS4*8_C=~u2C/ut(T9G$)* AGtXC[W.-S}2ޔe( (ΩsG=kiqިgF=oD=s\**4t器i.*WLFY{F\#2x̍6R2`A-*ۀyO('N ' M&#RDg"Br$D** B,G I;pW OVI7l~u \m5'2 N >39\;2w&2ghh\|$MYIZpL8J͙0[r*v%H S#y2pmA!T)E1M#}蝒NʨGGxGdG_@+YJ{}*DlvmPﵠ}9܏5V5+j;׻֛RzJ|tDԢ*C^ G$ fG޽ụCr8\*bp1\)]_u="u`v .G6"j3~LwlO;2F9xDQV(>D(bJ+$frp% )D[]IF!R=SJk _:xXVA߃Z66XQl3(63͌b3(63͌b3(63͌bsӫԇ/o#|FrQ(dRX9+m@)2HuH+ A5RIn(zz0l3 2d]Q',M0  /64-SXl+ hEiJ)573n=/?*9KoGO}+k_Xrmq0F'` Ȋ!!C!yEPo0G؋GUp͎8\UɍŦ.3lɥŔ}4%b LqX *8Jy*i{}T^_q*3ϧo+W_2͏oI/5H6,)u* ТE8WʹEvtQ8̮g=ϯyJldZFEh%:>4j%'c"EaIyznN|8_syxB);ԺF4"b[K)u\IlYl>`1CC Ch4=/g{!GKc8n}aUw"[Ckޱ1A_J c%jbt"ahnDϗVՇ_MmnYTn>}Q'nBd"ȻDϔ$?{ =_Z.NLqT:q"jׅnV[T~SK^-ndUYkjG .kƗ˻7ׇO/r@"J&t>Aq=c!bf8 N\Ll_}͑{#lPdzv4YkVZ_+;]87a`!9@-8z'FLj_D73c܌WErI/qqq}Ք;\}Z\*~3Uֿ]ZĩWRUxU ZxնpyQ!&3b>q67i i/8UOIr%[RђG0inRN%u;eCfU!_YX9zc &pX"x+ 9_dlCUisC>&T=Ƿ?3uzSr_pSTc jb% Ej%ճu''i0-F9P@?PݔYv3w?8cb[KZeNRMPcWE Å2dZ~6&ޔd &8`+mi&Rt$Bf$H܄I"Q5 ![SCP,?ɪР)rgXt$b]MqOg# Gb8T窤֎}"L;'Ǒh0 FgK;[S23O(j{[Kₕ+:k!z jcr8γ:B2&WOQ[QS0bbXa8t63cX&X[Pk4Ϧv^M޻++ /ɚ]A)b2SDUD [qL~4رn&{*z~?|3'ZV2dzKXy#ҕ^!0[ۈU~>To4,k|vMS ujJk+iZJ5K4jw}byg`vc4 :Fn=&A?/š {zʰ< zV@m(JY^.HM^ĪG%sŧy&UIաz=YĎQA08ҀQxXc+"Cz26<1hUw2ٺ]kS@դ]S XhKգF&'+s qw2TSH& mBǠmÇI=օ/>O/E_Oï"DI[o {.ͳ`WR~mxF9M>sM]ܴ n0yn# Wm^lgJHnoHK\닫a!}WxXB,HC vX??WTO_v"8ӇGRbɅm&^0f̶ؚ4Z!Tn}\aF G{rH66D4 HL`!<=ݰbN=4{2{2sAslgkZs PB4:[1'4dmP獂> }`Q!]&% H,8/\ tLۙ -r=,1iH78Cuikc}»[٤هjM8 qLC1:M}T QKeNQk5BqKA2rUŴ"Ppْh{2%K R*iWv[pDpjܨ`f9[=kF2`|QB~ %:Ykl Ԣ -fc|M6;JhzԞc[b|LBдٮ1,:ϓ1 '!Qei)jTM$k(~(E'0e U͝.hwLq|5&3O$t+ O1:"E!xp!جQ\Xe@nz1MyWt? ONxZs;Na6˱B*I1ո"c'[NI(([X09g+w~˗2! F? d2U&R UtS mՀ Mbׂj)(\р:A1D I-dE㽵E5afyނYS;..ۦ 7k_:Dtd!XFOsdZ }oJULz|+Sb>J$pw82y.n.:\.%9cbV mXp@ʐ8D3^{xѴ3^& eh/(6 t5):,WG&RJIK)-Z)&HNh1%Pd(`D~cd! k#ˣ|Sw$"tc\Z;/?*c_臲-⥣vv|eGWƨ>g b` ~8кQ7nkfloY7`vHòlP2OMB0!H殍Qfzujp0Wȥq)3oT)49BreD֛{S`M8>X}bZ8QZ@J-"Ak.`6q%@hP+O~[J!sޓk}o> Z8+^ZMju v2Gp]n=7YɉV׾:;6_j$jܩE*A\D#Ьs6Y]&O $P¹CԨFS ơ̆4!B-RlE=ۨ!qi 3* -O!Lc滮..cvto~n,K۞;bDw9}pc*AbH>aG}W'XVy~$9Aąs>쇮,=iџn"xlV\ꪾjM_'y?u$7,P|:IR/ zJ{fyM_QX>~z'yz8o?pI#02U/̿1/gQ>&ϊJM_~B%I߽ٛ>/|s /g~~8/sn85ۣ|{Mٯ7͠T޼i6Mliהvz]\2E"C>,xd}P$4CUYxjܺQ?mM#ѧ͢GBK,"Zj'R@#%-a^B/*JCsd]V'D#}͆9aiڑE .9:A$1o0syd\3tg:{Ub2dpEfUs#33D.9/@8 JP\ B QW^k"උAN[ߧ@G>]A}6o< >_-b)LØT2*| Y~lT |-`C#DF  JRrFΆz{|3Wp~tc҈&Yv3uL ЏgN?)T <&3U6F)h&8e.x뜡G `T,jSbDʁ<^1vw3I9Inc2П|{=f_0d[&հV%EQ΀L&ɚѥTv?r '*Q)Q$gAO!ZGPoȱYT:B18U4~Ep \֙/e߿ᅵ>ľ_מe$;ӷAZ{OGh?oK?\#깢V"09L{O(㌁]L0l|scYcab#ʗfD1bLj$^)G%<5F-)qyNDJ^mY hJTp489BEdDHQ|{~|)H"54jh M#ZJtFɡgHӸ@+bZDWV+oݷ2Z2JI:ztPP-\sUh[*GOW]=C"LAZCW8 =vB莮!]\FAS]}].،MSVcVv42ϯ]'UH] _q,>MGh]%/UZ8ΓTgQKvlu]|8~μי;Dqۘ ^յ-{a]?sLaS.(QyDs*R*pJ ip*ljU#H'%wKYH,s+HsFVƆT1Ʀs8^R/=jfYo3adMDW\VY'Y0$L'PM|ZE]Yjo>T{*ôm ֞ WvC+>,&\ \bKNR eWAYpB!h#_Ƨu }Pjamr[b?~*7^g9gNΤb%fe-unD~o;dY pԆ?e?L{hݯvP󱗽W޽8y'3eUcxLI3 2-ĩƱ3k䨖B'~6_Nf}e#>+ΛfT2XJ3SM&wOTfl{zimKzv.N_ -)m]j4mh'FͶ}K} 4m%֘jV?(At34)spkrqtQ9ҕ*d!?)8ܿF/s ֟Nzܧ84c)n^)T'ި  r?^yW'_㝼=xd%GfӶ+@kýx6sEF0*>rsZ5p!&jib.*HkLDR:ztTR$HςB੏ܡ lT:Bpbp6h}.<[*}kC ]ҮRZOz(OYNƜ E'pq,@ʢ  -[YjBHΑ|OjZ`c)qbT2K6!Ifbp63z+ \XL2BQ YDžOʅJN ֋!5o;isOՃ|ˠ?~GH6 "d*&.lfl7%XXtڲ0k; vēF8WYkQbYa  .(&!N CFːr= &{"h/pD Iѩ|X f>ѿx10b1؋KB 3cu# RZ”T8t<x'x P"%jAh4%D*qw%L83.8yQRVIE>͌x0+1u}yєE|^qS'PEPϚ\OfݍK8X ^sLj-%[E Nnx-hE#K_9j2+_sm?&_'8zo28 CU2&R5z>q}=iH^#Dl{a/l&DhxD2Xes=,S^DȠ$ ɔ)& &Pov4R.7hdָ-h-- 1HJ;l'gHs;VZ6X|?mzq9ʖs[~{ 0S&m48w{.?φ_<8_a6`[uFLg^\Y^+ZVJTۿVͅ*9W<T(a%r|ՇFStI5tIWSV3{Yft2R\*$CC9Eoc!J(je<9$NQ#6i4{(CasA!G18[X@<}3# ۛW7d}x49fks0bŴzEgmAZMd`4\p\HeELMt$Ij9QCs7. \wVI ;8VLJFPJҠWIiT҆ 5"ZT(xvZ9s: ǥʺaj*E}>3"Ifp}4gNkfN"JF4}iE gKctJ$HfsGS*֐M{<Ƿl?LWCxӡmntՂo3ir5cy\<2qٸ6L?ʄ39R/eв:زs[/GSH:o\GB$ X-) %REEIuH^x@+dhu¥(w_h^;=`yΑ: ƈ@LF21 Kab609hgΎtFq|w*A+Ď4Gn>ovNc;&=vVmEv !@`uDP88!Rɗ(B 0u()BNTD OL3Eb"0nпB"zK4:אF J#`TP&@S RpO-.gqZ ʌcx5Ъ?slX_&sr2b7m'| ^Q5B4 #F^ÿn]8υ\E:,+ $]},xfBNԩybʋoN"P[, . YjI04Xfsu8''qڲ`a 2΅H©I@e9SAxFS9)Oa`cl6 .$ YA(۸G1"9v$3rF6B+"[G 1_b289TMA8Ql 6&̆ #-huqCCYOev1>J/n8 kB}rX % LHڅhp`2Qf$rtL: DLcuKbԚ@fZj8eeגP}3O/#`09RA09!$2bFXp\ 5PV&x$y|txui2;Iж* 0\K1xx|ڕB\S !0VFD.j[8 Sj48B,`͝4]˸̚DsspHgMxz36]C:A9<GZS 1PKwQÿ3&H3yI`*dSvMW{lT xzKq K7#AUQ~m`/QآBG x{2HB,7; YF.7o^eŜ]cɊ27FIBLa"]L7 8>Źd 2Ʌο?rk3V޽9fgS͙1`}ቡ,BjcCL9C+KDops&hM3TS_t5AyA9?'O2pVٔg{+^yoG .^b_x}-WžnNQ|1~%edAGijși(|2J /d8>"puі .k]m'F;E-wWY͎|%0|^_'o)I՝J5ۙnwh.q_+dckgE?.;ȍpFp𹛓kTJSq0RXy8mYl$e;p'b :/TzL49Kg`u0Ape qC 40b2CMP32),8m VQr<,3[>Cz Nh=<+C })ES7< lzضP{7(X19B, 9VAĒz!.B]MI%e@mAxS(֋)%J@:$ic)5nv)x1Bμ`᜖(qGz߼`+r]}ЧIö>? -2ٌ˅F,E.^3P<+\[92%he+v$LȜy#x[Ƭ@C87QEtc8 8, 'ǥ(T1HL451t8C"e`T<jS:n@)@a9[WĮICYX$H[n廇p|M>MZ [t8\a4KQ>3" ]W]en!%mMZF1;s2ƫغsbz}цNgm6ԉYv- lYTܺ}Iƛ;nB0 7>|44w|uCU#% ^G3֜Nt*K5vsyżOM_K+?A(>&:7<5FKĐSJM/c>׻@@[pyq  ߊ`*T DRiÁrM "+" .jF5w`\ԛ#AQ',Qa5uZmG!MD= 5uf.>^jB \5zިξv{՞i[1ޔt CryI)877T }{`'VD\[7{Nn\mըjx`*ckmIg3lK|F63ه-]ETL{e!Ne!1\9x6zv,'6G'^S~}525<;}RFbJI4W {2H´&zHuhFeݞү6٪rG፥udU򺍎Oo u5 m:.>둢#\/%X\~,̖׽ObX pOr(tQ g|x"K^s1^H;W V;r'uA.*TK`b&* *gZLjIm!"NFG>U!,V:v YܬGDRFkXp*D%1H*#p*;ݏrR ՝f6mo3.zz]_JwEQ_L?\I֙~L?ʅJT'Qg=4kLS8 kBcǬ)RaqVI^oB;d!jN;;$BtDK!v5ZWwyA4W 8?CLI6NHpHƹx"Z&TmT]&)/R4 :rB CQV*NmX]nr'`wCK7!5nЮ/L.%B _ťSg5F]OruGPuU *QP4C&ZaVӜ*DVx!b xK+v6sO0OC;g\1![Xt$pqENWN }BN$8ɚg$LBcŕT K綎J¥9yLv,܁vVN6S:}yh$2)9=Es1JciTeɜ1 uOAdھ.%I2YYΪ ruHbvdM䞅ZA7H' N#*I!!Zμb{,Mh$D2'!e5"PS>V= p!G1iC+lf31I3rZՠVX^ iNzY6hz+MT Lj(K-xyg,<1DHUrH'G1(jk(Q=F,5 ڠ&p! -/y sXŸkˁLX, ލV#BcЃ^ E-#O'64}P 64:"KA)`R9d)&4B/ɼsF'[0-7騶B ^E'͢l+Qm%r1t-(s16bբ~GlQⳎBR"}xQh&3jE$!)CR o8mHʀ*P+>F!sC, Lg2y!'Y Y\ Dc xǪ2DTf"xhAZC䬤;449eNc=Vaܸ]]a2rxW  q^ q ٲ'GZtd|kvB[ǔȼfQ*KOMI (iR'` h,6]LY IR"d$Z&$8Lp!3x[4kpbP;lt{+lߺ*r6߬XI~CM8 -z.!F̜p6#f):ܢ}P5ʣ]v%cqK? /@Hve!Dړ%mB Ci{SFS(E~\paG_*=ΠB@p.hZo5٣ٴ]O܁Yp…7,4ـՔB&JiKINbOeT3ib+x-g@֏P<GFIIё)$D{Jpw$BG L}vA[+F" UvK8kHr|d|6[.#p{7mY*߲)Zr]1Z/eY;V{o^Jm~<5AOJ{늷ƽA#RZ=f3_~d>[D;4v{N 9/}\0H2d.cH2d.#(#(g#H2d.#H2d.#H2d.#H2JH2d.#H2d.cM` 6]mW&5$osts̓ڜ\;˛tReʹYgefR{jK^xnBqr;oz=}&<==3\՟OU.%@5@жa3 hE0cb,`,Ix'eD/QԤxC2@3Vd] $U Vdt98GT"f.s=DФlVNOƢn~,'׳.[N-a݌gS=:nԷeؔh,z=zzA'7}whYݶxGà!ۣ@jiݬZs8vH-j٬WknM7=5>XC!osvt=Y=!Wt̹c);] .o -iyNZ s93jZ{J|!W"m><'g8`Ӷ2FAM(4CGKmZzxP lcj?mک~98g-(x#x+2(:-^z!h胰nȥf n7W@2: vQ˖yMӂWΦ?;?цf*E.%^IQLfCBrZG'mV0A Z!NZbx+!$Z(|R2hɭQG<Čm=BYŴpreN $˞E4B xøVD橢y \pUL18{aKm6Ri'IȦQ! @M!0rYp`ևТCfC.PT۲ ?Jx/nowUhP²4 1fA1"JZdRK%gF&NȯVm<ɯFOa0'&8WJI?}~uԺդX>}MqÌMGQ~9Mܫ&gvunڸy>^u\ :ߌ|uvM.5_~xܮ쎾q 'tk鼤 ۣK^0Cmt Ow&';<%atvot--@1] fw,/%.apxJAǎ]ttY>omY Cy9n0x鮎ܝґXwT!Wǿ+O%8?yؐ8]*%$Kr8ܰǯغrljta/Z7<֩)| #!ja!2Y o{&lSq .ancXpls4[k|&o ٗdXe6E֚iEWzey0XE{{b1pi*S^ (a`L1d)k/GH#%#y`ay)tc aWV`͠ 1U[GVЕ]'T 8'?2Jicˤ i[NSclc OWRi4Q./"ɤT3>Y!$SMİR9ڢO裍R ]Z;LZ)"2+ 2 H}JxEcb)cw}{YIp`HB"$0iGHVV%k 5ZM?YҍlݠE^! 3'o^Ut0c[FlWiamGyqYQ?Ki,WlmFi`Tu\OIu)\NK5 rgUYO(SЕ!T*33D~pW^u͏ׅT'\|cBL hPM"Di2mic ˉ8XfT YЈ, F0 ?)<,q0gLpGty+@Wrc hn9~R]surd%%2GG֦@8#$VWp& $:f@d' Z!t0ޕ$ٿRЗ]Rc7fh{0v !Hkt*eQɢH{ʀex"22BzR[@W _j˭aHv[[sKWt`n봚gSk~Ƭڇ.]E8[}hN=x8i^jzFNoTe렽)"{\hGRT ՜q[|ze.j[(8̡.kHBd/Zh#:~;l?(8e)d(?)&Co(Np}@eѳ 20g,>JegYyѤE B{tE=)8 ?f3.8iB\c?e>L /sfFU.p#CtP&2c~q87]S Y3*jr^ 04w4}F5Vi~u^^V迍̫=7|? bp[ns? _[v_cZj[\׌X Y k=Xސ&|ls4^\/:zrr0},k[V/uj7:)kБ&A= Ʊ}?L? &m"]lTXb 2 A#%ُ/xc۫|sƅ={/޼?wbx"Bo!gMOoZMK OiӷwSu]v.MxmY,qd=?hy2<d_Dh=h/Nm$611tLy筶AD$Zfbܞɨ1IcȊBmQE U{6z pk@DK!h}Jatڅʳ,fP[t:Y9ѝ G^"qͷY6vݼr#* eh̼6da;yd݉xbM &ej2%exU5V^wzbާZ@&#,phKr*ȼ1 !4Nds."&R.9\O YiJz@yx\H-ZKf-'Օf&˹.ƒH.i*+[QZu31dNZƴ6s6%*bVw ҇hHULjF˓Қ.2m^9KwR+u5@nl&1'ǜus⣤?RdQ|<Ҡ B=\2,P@`P; xM^͞ݍwC4?wlL8pZϝx.ا{6}?f QȃL i"N1{2)[ %l)B$"9! CFXXK4lye7/ݦwoﰺٙGz SC;VS̋W 6p\UwS'78Gޝ}c~8E{!g19t&W8㊭e^҉ŕ-xv'f"Em4m9nC!QL zTSP*Ʉk9'?O/8m I!\OpRNkKCYjhR)s]kv$[0fxPvKvUkT2,w)!EEf)9̉\ҹx"s?_FXw`fɇCڜדocFa޺R{Ea֙(eٻD,D 9weפVzyD7$**CRƎ]❉yI Wd]6 X383~;SuM3_70;o[Qw&ݵs<,&翚]9ѤCtB9+I212Ƹ–u3O]9.9a|)59$`S\`* "yL6gZzL 52v&J>RMPubRnYqʮޞl?1y;d#vt&q HET$wIF$V kh!+D-@m'dBR!h A\2ē$1,!8#f;qae\ jw6:6Em1j{`n$R⢀ N LQx磎V,0RR2ЯA\N0+m| Vq YQ!)(ll745̇Ubj486F} I`<Dl|l#C#ޘW:dc"wΦh=RS6 dE!D 0)ƙ֤ n1m$T p \p&-F͍$ɒ(B:3qSI=WCuv6J6NyŇ*JS+P_GfȑĄ7T pn=xF`2.p`~.o;e5L#[i>Ri h =ׇC)+%Lut|}|}iA$hL |Mu^9,DHuloTJP9*u5ʙ1 ,@U@e_I^o#V<Х t2tq -ކB`.rNgTɻ"ʬ&B'ϒnOq/| <( NIf6}W;,@: M, $ϼpqR e >y9nop^;9hOS3vQN=Z{{V@N-D:Hk͙0:zr 9"Gi=c( LQ'm"ָx(4$%N7)O9V81R(J eF$$mȾ$y&GU6~@'E㰡 Kp.;!k;>9DC=_F I(JzU3JXdxe3:x6WNw޾f6Ѱf[KE`\ ]r᷽}'F7.,rhR[r^w`訌P! BBηszScvJ_C"rT1+2d .AB霉e,85{S$s$kG7.syrn<OB׈Y+%Mns'=Ӛ %+ __mtom'/s7? 6NZ?E8[LLgSjz^ƾQ *nٝ厐Fcu=1򸚓?{Oo@0۹e6x(er\Dck ^Ԑ6Rirov& ,!'~#Y_z>Z*͙UdazYrby8M*-=YjP\6b.V9TN'^LZQ8YxhI]-^U6]JcM믰n7/[7 LZY %!2'A̅ƒ@o%r13Y튛[nLmFEȗme rN69 ; Yűn<#ٙx_bٖlYn۱$խf5YU|Uy_K"gu #qQB29ԏY*:h1yat,в0A䕳s9͖ͥMљބy8P _QP ʼ^% ; Uj{PRi>z+ +&XjU%wEz*ATWWoP]yVV2"uU p+uz^J}WWL{tUPFׄ*^~-ꊩ}`ɽz w5*uUx-ꊩe8wuU\RWJ]zel]wWx|>>3iB[_}ƫDR&-c6wb^R2cu w"h'(7Pr!h 8k$O_B@xROK/FRgo@h yjI"ڲP2>;ɡ8ហ3vc/zxEo6 :& ȚdaUP#Ġ=hDHQ g M$i)FݘWߴ1Z~y7_0rWš>ofbS+#`T{Ԋ'9(zwC˯dݺ:$i$oEYRQSB|ֹdU*"M :=EF 0J DVo (HәD`0Jv|$3rΎgSlVlHfadM=p9ɛ /.?k7o9?zsRt>՚*iGȘH1Xb xȢH$TVKY۠,H*%Pϫf4ClR iՖ%3(25&eF]'I o\R YIr:zeIc[%.Md>J֬9]iLH M|CfB7!EKlDgvgSR)(_& gT%F6Dj9XA;U͔͟^]Nj@ X,)2n&3@Ć20~='0e/c= ZO.koT73hePq̧ _ܺS $BJqY7`4ƷC oi3V |{oSEˆ1b3 |.) \JwNPug;tqz>l?܇s"v7Z@u3tlr&^^Y)%kA`QƸ ْ2Z l80#)X˴LNt+-)@J/iclL%dńEatd*YĮ]Y}cO㋲\^{Wu6EkYcW-,>O _E~ώJ( RêZ7jF94iƗI+=/+*{VbCO+ЧMX6Y4X(zpV8DQjhP8 "}PR# _&p/ZyfKtT.]mdqMʰfE:R8&[A6?" T6޶oGؠhYyG{K6>(DS!x#IUq8 8Ohv?º c` j^7DdFx$ʚnFLxBHի. ga}bIKLQ:#-s,tR^b)lpFDJYi{AV65Ӏ@P[(,AdXEJYC*e *0\φQuZuuݡfR5_yuz3}}4.,,Y9c!OpY\ 魻G ahOqi0ԻFN{=&?r.g8~~TCV3G ́ lTPRH-8:>+Ot dH2f!XHKg'Y=\y/:pR9F)')4H8^ ʵ''3?o?lCxu0 ['|qW0n[&7V K= o*q}@mh޾p}tM} c6]On>%8"r˓!7a>m瞴|i;Z(MG~ #׼pQk_t=kʮQOk},gwUx2|aqN։*|mkdP6[ j6"=z!Ha|LۯbmZ13}|`Q0%:ڣSי'DDHA&#uSi IU""uAK-9T׾}{ɺ=@ai~=Z{‚gm΋1 ,S9r D@U)S*{2ƒc (ɖw+oHQe$&c%3cA팜>Ԕf°&MbsH_^-=݅{Ң)'W!ŪmUqgh![9jDn<]$WXν4VNu"r *@Xdf@(vĒ6&hXP9XHJ1:tWa%յf쌜-zvU:ӅqƮv ^>.\RT"$.^MɟijO3d6$XJʈEjg$E=zU"úSU[] YJ蔔VUmj3&3NҌ2@\TDc9k4኉w쪵]Zۅ@V&P_bv9h_!)#cv+SҰ\ [Yg]S-HHܖeDLX,F5uyXF}1 bF?v҈H5"5^#'ҺT `Z`f*rB+p%ت!BB8oj1ak*U$L9r׌U2[Ҋe4ՙU]gvFKX/Np;M}{xw?+D-iє%EHX&6j@M,ls~jGPA-+`jzA=r6 \x\F1#p>g%yc4bɓvmrl TWxTDI.ڦ&gF+x<:קfyGl/@TxCv311Q{cbgKI9N &WJǕbD(C ǐ*Lcs6C!2|1\s` SR+rsܧf7ci}nl]|9}u_7a͊I4,gF"w^9M3;ų bxefIZB*hֽyfhp.{wG1Q9H _Vdwl#U#@#][s[n+~JHәi_:xK(#;Nf ʒ#2e3ζ(.,.E3xk~sd߫ 0) ~ &bCb ,-[hؒ>d~T/el +|;7d@kڇ Ûi Vڇ ۇ5*ۇ4:AP]E1gr,G˘!؈XZ>OC[h&ECjlZn=oe[#\zK$)\P.CFd"eEdV1*䀤@<<.~f1~2}Oi~?{u ٮϥxrMv2}%Ԣ3;m@4i??ǫ8Ǹ 3&`uqI3{RZEK)xD˦$ hfIg6bs))9@1b j`9qɪZ[<UԭLBi>5R51z1yE7r^649c1ZՎyW^Lc$w3Zobt8UջY4f.gKaK.AWY-;pt0$CphMkʣxz_5:EBƇy4\g8DmB#֠CxKb.\!{4jC0YD=VEOvnkTl0uK7KZN< ׃3k_ e0qr16iX HCǏk7gA7-w!vvфy(u7afߦZ|*xq'Lb~Y?xtR/CIuc?_WgIA[卭A5δdltу1Q%1-mɓ }ASb>}/ه6ݖЦ;di[(/f1mSIV!s.s|B!.Re`eSO3|̣٪zFiRk%+ÂѨx&Ŋ!T*H):M( 'c5O:IqF5Ǜz[lL0Ħ(J])phYyrBEX7= #Iew(?Nxop 0\71hՃE5ND^Ўr`GwFz'7m!YG|T0y'A/FYtIogyYf9ꢹX{Mi.i(6\Loڌ*eqon\y>y|nYws3]_q*ޫgpp4"t wkͫK-{99`9cL1mZ(zz9/[\IAnL^MY9?űZ̮Axm_2ܚDh9Q ۂ{ւC4Y@,ڎG(m_Ҽޥy:L>(,8`HRYdp.o#bqL->Qm.)+t68 3Xc[^]bǠK"*̬[dB(E>TĺS$֝/|O}ܬt|Ǜi$Sd֐ Ś"#zwGղ_5*рT4z-qXsf:N {Wcvh ja(7°k0l"{ F c ?Ajp˭us4LUl+b;B;e:}^ծ3L]:iŹz ڊ1r%D#*w6 uqEc6PUpUh]eO.xKʎCW P,!Rd7>Z΃FI\F~s3 r1xkm{岖 hi=e;em浯&_lsnYx|cviԫ~DmSZ-Tr4nlb0%LJ|($ uQ34gkp)QUX,c#J*l 9L_9cUHiaJ@ Ue*ʾdl*P :Y7r\<'1lj*\r4CD @&4F赊v#_Y,*wlQz߅=_YvjQЖ`lu++VDSk `4v`J5&At39'7=d_L ,55dN(Q!8Pt5QqtQgEߍ'_dx?>RձOuIㄽ-B/R*aB6ZILܢӼJ1#"}ȋ>"G Q}ϬTb!1̪$DRfΚ5ƒK\Vu1W}Q<ǹHG79V::'x':A^5ÉdȰrW zln[9 Hc^)6΢B=dR%__ǒ> CW&#kq@&7:i9D?&t=ospX>UGo0ݷi7U:z MڋIáZDͺB,)eB0b8fQAء/?>_iu:>yˍ.,Ӌ6#@I..6-ЂWnsi:>lwN_&y;v_kmRln#z%H9<ҒO|3iv=D֥L+{˘X@ ChU蒊EGu,rcnpg?Yp( zm2UG'd J59Sb-(!W] X> KV']0tBHx99 H;|Y#Jn@i/Zm &8BW>Ahu"o2>$.z@C82Z` &sBn[G؉r\Ld!s6(@YP !m]4%З¥8[MP!Mb:"iĢMU9j\#0YBd;#nXV7SIc7WmgY?Nwýʔ׫,&S>ޣŜ .=S/wç],z)GNn5+n@hO!?6êQ@=܎&[WAk>}0=Zf^َVWXRZD6xICLf > L2sJdu`*It]Ȥ3r[q7Wy:#pu!Dmyfzb,ɲ!Uw]*h.\LkUժ]*.\a. 8 Z,VvGI iUn +հleMjCgۮS),ۭ(F(D %]/`9e +C$ Dԙd0g%Mh*Ov׼kY|Q ,=@Q$E') /I+"(T@_T7\MYR]8$Wxл'@3Ym*8' Kq44[+>dgRB;0(,J%muj7}A TAa?Ekl3Fo2d 8vC1o")e$b}[FڤUf/D&gH%E6T^S\'`.n/yp9 VU }^D7;{gn+}WWлrwZWr%٭~ag~ip;קƗNY(5˻Ѽ$s[aV]Գ꼁ꓔZSs IKvlF'c* If?hqI8O (a~A4>Ki߭)Ї֙>NC+D'rZ){vr{R#FkƘw҃!@(ǻWS'Y}tFչ)pFxz4FEΛƫŅW-0ݗ|>|m5^h<|~r}o4~ Bʵ#}uèðv-3mOu?-zv9ҮUG]Nrݨ֗sX$-ylxm2Mg4QrG&`_.S J__GySDѲKYvQUCu(/?25LO^(-_SSS9x~{NIBr?~K_o^ 3J@IQ(>~OCxˡ  q)oeCM'!^Jm08c|Q_kZwGHrCwn#*i(L୏dѺ0B'KY;VYsI&K]C f3HOlX ^z&ȱC h@Jhۣ]d(|@:& ^;mub:tz=$mh uX5d>O1,e3[d Lw('YIan\T a6%̐5$M i.xTt$m.6d@hdUdK` BmVKDE(:%l&xӖ?Ll.mꎻ=381BW^}rX9t)Ʀ(R 0hh*Bw醆)~G+͕쟍>ǭYlHNxwoGޑkyج"T d A׮nq( lRDr> eD°ׂ/M[vF'8=Ҩ& @4sU/~`E;O"r?{S{wx wߛs >CrHbkֺɼ'7 xv['fn\´Ն/&WhJHL h嶺!ux8n{K8̚A}C(2E6SCyaYirm1`sU(ѩn&,qCh Ge6R shA _jT8bCݮٽunP*j}q3ka3&DX{r3$cq9 bXJ¡e(g#OJ2Df&sFc*Snj93*Jo͔a)Mo}j.\]/=Kf MNB lʠeID=5x (ޕK›J碥bC'o\.U})e+R2*,adag;cSYh:z[++2޽}E@'fg߸NHˆYjg$ \v*+F1^ |Ře#K0Ge"hKR 0K_C ؜M'EFM30 _ ,%(V+r ܧư\Ote>7j ^}9=JfUPp?ΠGWyq@Gτ@<~ު~.\/U0DdJjB ^|1/\y&ޞ^#3LG[' o3CSz ZO?P%Ā d;夃meVB95l]6S5u,iUB- ::#~[etrѫ¹񙶋,hM.ƭsu&YOK:QJڨR ÒIR(/4gx-v¼ c,f8֜=ACǵ+ϓxc`|X\|ڧH*hc)N =+ob[J߈}>$LP$qDŐ- 2T9FZ SBH*#n+jcڄ1PŘXA-T-{){_t쌜݁,kOAc&^ݑ5kjlِMww;oˤc1NlQ,SsɸQg?х0i6F^`)]oIrWۜGC$^gv=CgOgIʶ{Ç(9"gDR%NUȍ4wDӤ:ش:u uS\#։bD*E1R!w8T.xK lF$㑅H>HԔ1тFQ4`p1"ت˩~ $:Ȥ=~si$|fKc e & ¼&BP~w MX5jgo7#əsPNeI:ML#0s<%p&fbeƦہ~I($3 |ˆG1c&L\y`0b{- ^aۂ&wx ڼ&zVZUsr0g+pZ j:Rjsfȵ1On&zGԢe6cw2%j!21du3:6)YjMp;O Ƥ&}[[UFC5MdK"kY"M` bRMLu"WsjEpxLu.T+ݼ# _<&Mx؇ B.@vM~njE'SAQ+aκ{Ö iWG TG& KO9:2Xp1ļAœJQ*fZ;29M=;>E_ef;gSdM.g2  kXf4YJ JKXFr .տVwJ¼gIՈB}={ 4'*/؇uU^߷YKzvKw=|aq4NZIsY1ٜ HAm6|rEhC[T{ PqZƩ6wO` @M+F5.6 ,jk=)A/%QI+PR2¤h m!M$!irfb]ӕ#g0›k"/*=0~wMWBh5VKy}/5B:=[lwձ\*4<`IaSWpC3gG.i#Ý6&Nή \`Q䞀QNVh퐓Vp\Ni4+gEC%DhADHD"}[)59aF* YcԔB~?%1GzxQSRPk- XDeckn$ J**&Esk?f@5¸bnP .@"(42cD Yl$DB,TT-7N:1}oЛz8@sr/9麂[c' BTkdFca2qSF'{iѦ;OD[~+1nOրkYj2 #C ^ BH`’s^C"gU'm m㐧`Xɩ_c,ԬG-fci7(y68m @sy$1 |Pr7ݝt4!JnX:K<ՙ]I|#hM2&V[x@U8DrE;$t&Gr60ZqW EH< i񭳯!#Xk̻ *PvϠ>CK?>: +O7N:E$iZ*W4q$7}0Y?6by sX(XXZe]ڃT,T}SOVܗg@T`¤>(DlGR3x`1 +0JE.@/ : R== *u8sdh2O`?]toyT#>IZo 9 t-8gD\^#5B\qW fhi!F VQaK ! Jƀv)yӺH.)U׆w5vX:HP?YbO P\3u RO82]j˱p Yo.4-L )Hf*m8.5QhX{B$k֫Zf-6S:3Pq.`,m8ۮ?.huJQ/ӥ̻I*zVtH JࡌLjx9b(`DH|zyb8 XJOl`q%'{CX' F[xS!NT"Rlkx``3wҒ~&>8 ޤ6V`}^MQ:,_pnx`#1>D歕tThfF#p;æO33O?z*C<yL<"}FPH  P`Y0hA[!OSoBqud8{pJW[e\Qnki ri;!Ė$u'I` aZTl] !)CzRԴe׳ '!} O\6ڜkwa8M @PʸIFMU`**52f^`>a|AB(FIESf0V/Y(8-,WP ӛi`.$HR$h?s=!H}>D__"\AK,`EyRE=swk}»eoDnzʷgs0&#Zx7XJt;xw(kM*WZjUjJM{*Q}KRc:v/t1}ՂVuu@. qwwO Sd_IEyѸp|W.hG+Wu2^j#uQrJ}*E~7qЪ"_UΰْuZԱC-"J|zI4@$ui֭p_+lH#3hZf[l2Ied<bjſԈZtm|hc}2c2j(}sW3fţy##:ĩAw* *ħʔU c\"QNq4,˨t}匓ӍF''#7NCWR \9(Q-V4"4=@& @eNyţ\ā.'U13wL8n ~Iӷp'_xvDr9 eQ8ꇽ b˾G"@?ӋYū2L>(Tფ(% ` $g$lz?"lMbZ^ͯg259lP6huVp~](٬g7`P٨x ߙގ^ 7?f'cFE ڮWfYẀxAh< L*,/E?L]FLq~~ ʹk<&˖[Zulb}bTހaW@~"3?3lנ лOȗ{`#  :u:98]Q6bj# ߋkU g310 f1grH8+W;TdlEǸo'qVuVUUݐUW~qZ ZmӳcـcuU\^Xtgb*m[5Y%҄oY H(򹖜N)bxЄWc*E@y*"s,G`u޹UӚQdرsM2芜!by&˴حg2lMbUz/Jj0dA(oA=w^u׌*4C]X!."ϗ&5x97Wj!,sնΛ Tfi#DuuØ3n<6ڕ׿٤2@O˻G&9#' Ljy.O@Jط;ǧgj.=D2̏W#X#]n< \=ZqqTRvZpW}#ԒAWBwV є:\L\goV~{R'9aJY]jp:ka68wx |-kMAP$+zȴuF0`8r@-a:QD/ B3+ XRr6pȕ\*QũPbtpROwpsz>F0+lP$#`[3D8*+ɹPˎu8* 'JsҮ|ήJ \vg{8W{I@bW=uHu cUbg?uUF%+I+`X lUbW碮$U󁫫]Hu*ԕ c &vٳy3}l,J\ʣ#KTWa3RWav1BE]%n1;tu$7/R]),b .ocU3"8-KX0|̄UiZ~Esj\Rsε!Y Gdz,)σp>%+S\_'it۫œzۅuL |H~kDzدR 9wEYb9A\=͹j) VyR>)|b 6(h*UO>die颪{U O2{32$cܗ_bL,|1*>+2#>B|(:[y'%*3㧈[,Ǚ(Yl6I} ')>!ouE'e-01)mɢԴu~$cL"\pF5F ZVWjXXoPBjG^o^kSGE%܄1W|>VWr!#xu\)G?gt=Wh5ΫuZ,#Ns) RpŨ&)m0ʢZILu %%c88/LPҠmPV# 貐Ai9IaؙIPSvҍyMh尧*xozDyV"GCfyDͬ!.m'=+] MqNIbXqcidyp4SW+3#62|$7Jvࣜs.``Q䞀QNi9iw`Ji+gEC%Dha!`3#*je` :aF*$؝6Pd4mYZcr!Mfgv*\D5UDGi–JL25i7 N HQ)@z"Ц\/R hhqу(`n`(O+J(Qoe( KP5&CZh'=-~+O?RQVFw'w<]A0(Rrk{AP"* SJP52ZyT(qL{u:1IUfۭ{%g/H7{`j UO^5ԁX#FHǐaB,/1Q*8RX aɹk0鼉HK:J['"tu=>km(|+|_XmFpoPpiYx G*<*NIpcr7tBvܲtx:ՙޙ&y$̑De:vaW~;.=euׯHYr_I9 KW:;f禣Q𝹛M`tqpПt'G~IʌD'{vLc*L)pG$7N5FDh/Sn]Hw`lp;.ֺ<_F. }4v˨wZ)|w<sިt(Z췵 i~miFwtܯeQ:~5ukJ2`k U c pLz#A2Z{1b1VZtY TBsߡ2,9?T|6diN%]n.C?@CNlnB ½w7/SKpӯ]k@e.0=Kӝ \b4Wgw+2XsG~!{|9x;W3 7jyRk6WmxDZO8c'{O>aܪ'*y .%:/1}ӷ ,gmep6qnz(g>l"$JQͭ$G~Zehc_ A{E'ݳݬMǸ48K@ L4eܑHjf,F cŌF(Dz,R1G EέRȰ, ƀ1Z)"VD h$-ZV؎n[]| +s8|j3֓KW1e;ҫog׍ܟѶfm5f`aYjRQ[>W\>+7۶s`-8gD\^#5BƌL"^ˈiDk45 2ln> wOFA~7ܩe3+!٬ziF3xVOfB6t 7'=N ~pqєɍK(\HI@h3D CthCu^L?Btf,BZjmNvޣhxuNۀRa4l7;i&k~Џ͜[{<|dN78b͗M֓6joizc/!3.=*\kW!Y˖6#BDyl{5u_/%Ó~^{TTi dBNIxHEmeQ*tZ <ǥ&*2-k?dF@K r B)b0z)#"b1hJCʤO&ZcG?nM鐎(vK5 UԬf>=sV%)~-r{_rҵelK̻I*eБQM=Z+9䂲R*(#S)$51t>jGi-^Z+N(`c)=Q)x Gc4XmANI8QSDTYWd =!"$))H0iȝb΂wD4Ƶ;Cz{5sVz H]68 \$rZl?O(WVЩuMpw۟L~c#s-up1M ϼa4* h.prؔ3M.QԪ4FD14(`FغiE$n\/;ǃ{5dzz tu\PXM,|g3HpyPKlAR=_*=sº -jd)h8$0-ŧ9-6x,~w-uTWd9۷,ga3 \2.MJ, Ƨ ٮZeO : VĂߥC߉,x;Y2J7H"=6;iq],%3፞EYGFͦut~tq ]MiaOVIáޕlB5[uZGVf$ K($Uz0EvԐLHF&m( }2(Kf7\uZxz!uE(9%ߧPbSlRtZkFdC$9s5~a;.ξtĈY Y]@:KfHt=u6 LQ_4'3::M1u0, ;4vb;5{>?;x6/tó~^\x8{`?觬 nR עjA G z¤ 1`3X:ev0 B<姘G 1$_|tb~K4O>.(t~I2WΧӮ^*gIen0 R`SuQB5Oz3J5W ҧOvi |\h5=e2~W0htyLNB4YUbH]im4 w`2AZJlUr1,3g?+ݛUκ3Gb%8:'O? HRW1Ru$)d;)\Ǖv6B7\D}ظ6!T͒ z~0Wͷ9Wj\9'ܧw<[OA7 "@G_ )y:irTƖ-3>rOg `8^WpkY.gQ5(,A̓<+ =.I̚NS{aăN'Kg6W#7blƳw=zLl4N)ٙdAK#y2@޵ƍ#o CNvv;/Y,p/ŸOmO[T_l'ٱAKUX_[ N(M>δE 2Y|IbtN)T%BC n&|+e*EȚ:lkB vbō0WLyऄgD8_6l竸ρpJ 竔mF8/ZoolfpxӏzB1b<;0%w>DJ;L~PFY 8K Hw\(ALR(}$ c-F) !'jm^Q0" 6a68#LY`3 ֨M`]{[s߻Y]mBg>ǰ͌Uy'<-!釱. *m(X磶<$/lRQKPȐB\jޒG\$E4%榺"lLxe#TJd 1A >ڐͤ :mcv(KF4Q/!Ș4BFȢH)C: %.&CdOq5yɇx0@-s0ƯP>tBfzK"$} IJGQE\ gw%2,]hUqI^[~ZQ?i+5fR%,1ߤy`J$r2~yxd4ÔIFcjHwk?6 s`bmՋ;A^8V^ۇЉFad{::@COoR<[prSמwY tgǥA32E6:]^C0mAZ{s_>/f~Y[%Z9c7xsMv#|tS<_n;M[(1ͮL<gI.E$?Y=&J8&#vM۰o#]e|_QDVgh1XW2h`6mw?߇^s~qa1/췱]}6ξJ?UxzWMg}W{Y-Upܸ>m5PzQ iGRJ'H4JI+EΓc>} y5b5dٝc U c)Sd?u`1#k& Cwʝ+F;g E[9@ƢJ)ѣ5,W|K Szw(YS>zD91#JKĀ:0.ֱf<0}Q]IPaNQhXY +1)X'Ǻ)I$-TtK̠*&.K *6JPjq56~T ؆]96K+j3qWT9st^yB"ui,fdWv 5vyXZ]}w3#Enxzfh)*߿\_2z$ & z]֑OIHZ+_vT0rm#]ɹ( < \Tlz(ʁ/lAJ\TkLmUf#cW[hB=½3nK/.+2μ:>?cin]8>>p<_~;ab'IXdL`(#VQe2,0s٩x-0)cjk셬QiUͦ.Yr%mp4# ЖDE$o"_s1b]mld'{gbPIATfuTl d:Z*-i+ƺ7CY^tT*R>Yl@,;BĹN-ƱXfc'h_"d'x'V\>'XALQ"3R)V#5٫mK!ɀX:;Pڙc@tJbOZ:uFo3q{D#eV1:]bӴ^}W/ۣ)IJXDv >@l,L &8VcG{(e[{(w{0aۂkiyA'yo4Y,?cUŪ>IpSe?j4~.͗wB-?.;T[TЁ6 Ӟ};qY΍̌̌"Q؀DE 4d]>a#K0Ge"hKi\)KPK ؝M'EFM30 _ ,%([]ZVvyLHּ89`ks^?}>|ynš~kY]vGzv\1^}"jW*U}6JU\υWJnJ,%J1*Jxj?TT Sڨ|*%UfR]D.6I)OSCTF;~DʧGa?h4B!Rph-ia93ҭ0ed|A:L0 ˧nTӋ>>}w8Zn1\UE& ^جeD J>]tº`^D2 2B%hisB ,検%%Ary G0 <$)"զj^|VWZ1PTve}>w`PXd7R]J%muj7=^ LITxtŊIXOD V7M\]pC1o")UDyRKގU)_,L*y J0Fmܕګb 떠ʓ*c定t5rCz_:Vg!.f^lµqٿwyp0z%iEf^~QXd~i;׫WABĺx9?Ϋ,of8!e PRH-8x:N:n}n󿷂* IV? Ky]\pOo_Y{[G-.R7۾iȔg{CL?|Ofg7"kz+P'Fkxyt7@9ݸ}s牣aϦxճu%CTX?h}u`v0,U?E9;Zƫ-N\^E^v1- B d`K`nod5 { \,Yi3-N{=X{⧋O^^9?_p^VC:h}˺H >==O֗KY5@NW]ˏ//&_eWdGu-,o2>:=?YEYZߢ__J;G?W8_N숕Ol$/?T}ן߾[W_<'~/j@EǣP|^j4iZ{xHMzi[>@.Ot{՛ڑa|ЯṿPOuk:#ɽH~p0>Dˆ;,e{Qd%lu/#}`Ƹt1AФc9|MH mO L&tjl_Nk:;,l,ѠPrוp꡷D7aՔe4Y{{YRo=~oEOa_d켰]*tV 陶<^qG4$G?Y MlbԪBGC1duI&0gYb2H95dlS$0N9"J W(1;ʚBQ(kMU R5IĹ&M"~>5P%ķ&chؽG7P [<q4cx6 кCM( v{c AݑAE+Բ2yO#98/dGzܒ,86)3*%2N˜w[ܒ#;`@¯xwSqAbݍca\wul~zO4xmCw+UUdp\+8g p Ƈ@:w}\6֘2쬅7;؞$KX;T1A\1Q1d 22*9x蔄9S1RimB(bLthZ4&{_6p [S`9}!"ӯKk3Uu;{Wֱ]={mEoP/OE0ҕ|#'bH"yȳffk #<p|s_/b3W.[lץukKkP;Y6j|1ntةyU9!S7y1~b)mR;d8sfBo+UJf YY][upf-)9Lp>zzU۲?9 Khada$'qEOX hxlY%W"-U$"zqr9!먷Yˡ.*/8.b09vNb- [khAt5] ].@kJGztʂ*xg8.\GK@HW/˽DWb pY ] JtJDW-\38Ψ^]w8^٤"]^ċkߌeCor{X/~-w)^ώַ??p D4ʮd֙v+ۓC_+8,e{{gn nԽꞸy'@߁d8ޡ;9e~9P^m9띅 E ۣio̮'lߞ\_\ZQgh 7xWY)^ V̂hz ѥ@ká@H/m:+] CWnЕ aHW/2,`/7ڧ^x(t"J"E 8bjŨ+U:1YEEXbjua o;HW/| _0GWli9cW]hzjtGztzǽ >k:x]5橧Z>t**_Drw߈C|t<]%? +U%yWknh38P?՟=>}'>[]7{}'Gڦj7*@WHWozwv9nEx񍉯DHUr[x?y sKh;t(m6DtAtF|t5z] ]-t^ ] QK+#'+ekBW@+J'GrtاV=hv*As= X,:w<'`)ԲzgFձe]؜ :cNUgW  0j7l{7*MO7w@ k0Xg-Ԓ8YM0j14=.}qC;HCaL$`CWo h<]Gz t'M|m\t۹.*#J5'7|{(1faO(z*E.o7|~Y?'wg?/QۀWZ??nx#g?^utA%^oǯtvvY-D}Q4k}}x]%e{aދÿE^V8^"&nqV9x󱟿q?]z{}x'oS}rS3BcKӛogwuXS_HnQ~b~P-0P7=W͟3S~1?}@#~!Nߡw9=1:s;7U[rY^`'mh-Y=l]|Ug&7x}L %cwwu?_@xM6 Vߦͯ:\׏p%yc*-[ɚ"{>;JƱSk輽\9) Ʀj*TMr!\ml;J҃> ڎTeB:2쇅KZ7amn` .MIeN hE#ω6ZChbUZJ",Q5t^)C~-6*ˮ\߽j*zw<& vg()ׂk$U'ZpO#zΌfzB4CcvWn8{PtBR?{D4!E^7cѭNj@@fk Je ưXF4ي06Yʹ0=>E c̥ѫ{ hIG+oMl}GנQ"Z7F偤.]Ԯ/Ri/!B8FI{@֚,99s-1~h'! yI4zU`ZmM%#iH%%QEkM$וrJ VΒ9(M!vƒbSWRLHHI?EŤ) klP7'Xs!Q[SY BH]Y5@T5U&?'5/.R:k̈́: znJ !(2>I0k͒nB,AF=ԆR3um4!kR@x&H+)щWS VBFn1xW   oO#4:68ìaD%InA共^uT%nA2( 3SXB}V_Xtĵd B zCs¤ݣk+3\4b`)c$XҼ1 k9}BEۂ(DjGU((IU@Hɖ2 RQO-`tWpCZD* +5GbX`6/ NW `AV JrHPJ⯒2+$_Xnl6!L%UܴB Av);B߁ 7CAwVrEp2nPv'ducHse  EdBE@څ|E\PFt[s : }- zFcbV3lHHԯdI>0!5P vN YW@H2|{0HVml!  lb}FФi!}p0(crnv[qi1bEo8*-28A%9;L<*gj k阩Vtףǽ}H6=*X = o byPT8xiLPTd)Jt%cbZU*d`1 vLܧwoQELr!B2:Xo B&Ȉh"A]RHYtŬ>&`lyɨ ^4ǎ=swa”A~,Z5K q[4G+ڙK2fhNJ@A !+P,vmQݬ%$˽eAKgqR3I:!‰!peR?\s4ݳSX绍A*VXC ƛR@hS)9* `--,BN'XD10\ ֋ʁXX"C@w%Xکɠxד(i(>&#)-cx OjiTꬋX+фK"]6X=|k#!.px׋t\ f 4`˔|D]!މhDVEeTfRM7~:Mn # n,㑥6$rp*xjۺ.9={~xxĤR@Йd;ZUICk7|$WV%7_V,Ŭ}J9})|}a>[EfrM_[]PwaT5koִ"KyfZnmc1 P5zb x/?N?ΟͿ`GkUυos12ůz%p0LC|A.YwԹ.Sm>,nd6\Qmyxexml:MM;j/$dCF Sp&?KEoW -?|~(H)p W{芄+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBpQG+\ȗe/,_ȗ W/pŔ~;$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBpW'@\N{Ci=xHi4^" *p+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp%}"\@p Hd !% WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\! WHBp+$\!S՗ Id0hv\|~3zsC0,y _>vbhP}⇩K? ĵ70Ȭ  *vpW/dNRfUo \@ZԡH Gzp @` Hk9t"[6 ˁ+X3kg0J@ZÕ63"k\emv },8UR3*nBȐ67p \C,%Wz{D2C$%Ϝ0i+aR2}XpeWCO[V*ɒՀKu\l)KU.Vo۶pT/tYDAP!)޿{m1W#gZ0HZBnv\Lg% Lgq Lgi=tRZ0apM* cUfq \| +9tRRpJ\_#=+rʃw"\@WJƍKa/j7~iN@$AGFt ~͆IB8 >*>+BTAH[~Շoi2aq>L|)|,i^~5A15haḨbQE9er]g1к)gi>9)ſЂM3T}kY'儮ou/'!;mQoKJjj/IK UzムUl2Q ۲b NfQ[((?^փl4xX)8RdY k7s6piU%)&t^20H\ 5Bp>b߬G6)mEJι/.Ve/U1}ˌ葋 ו/.vVFR"]l:ڥ-f?Z<i|WO\;ؾoAmnsbgq%g"2VzJ> qs*wclb<~ཪ2+DnFMa}ڝ%ߛ ƣؽDIT.2)%%hjpQJhjF |bLR+e"Nn'RchV~-| J,vyOJNW%7*V`V8Y˒>`w͇h2OoFnecr\y>fo~:MҼYS=7>`z3!`$\ͰxũDɏXߚl ry6f ?8.8.`r#e)- rŽч|u:ˣzxvƶ\>[kIN|ǺB`|!ԁhnWv/7^B2$kP̛}).3~ >5 Ҽ wߖ5(4gJJURSCq}T* z4h]7mu4;٘QmLJo`r Q>yj1?]~z0A{ghRGN۾vӷ[b2 >oݹ4N&Ew$5l+[1M >A^̫t2\utst68CZU#wպFrikH9Ҟ\85{k:ӪȰ1GȭUތScb9?.~^|~5ߏ0/_Oiy~>%P)HUW l "TFTmh>.&_$O~xO?߾:w? e>y+X2O",J{^0_T?߿vGU܈Ti~zmMQW%^b! z#fjPdm4[; _]&#'{>Xb3xYE0ywbPȵ/*)jbd;l#}` \fk=u>*sD:t-w|em #O{}q ke$6ۃ I:RYf,PA$. |y#콧S|OmRn߇:Ӊ+qr(yeJLJ,e$:{Isر>2՘X)+'''QR,)P ^so"^zO hub]Ѽ 6J39ߟ$r8 )0':٥tUt,Ƹ݃iljg 3p5h~Z,&͂Tak ǻxUЊ{c9rIuaD%j q4D'-/xk"&D"Z0ӈzhr}Ӓ^wiЊ0]c>9Q**.NӤ{y]귡^_w };6''Ӌ"~Sr< Kߒ6_kWc^Ã՛nP0Kzph&+$ª3Jx FY__+.,SmqQmWFL[+ԖQ#eIL&r Li@?eGz广`;'ɊRH 6!LJiEe Dsf3WC}vnemsʸ"*yU-R(e!8a7VIiᩨteeJ4a>NCg:%3xCmJ } {KHU$F+ʫH]Hq`)YױĹgV>XZu ̜nCqK5L &)MT2有+M`"{T5r6QCUm>kD[|T lH2/i_L0I@lyTy)C3qC>魜Yfwّ:qVsLsҦF˴"2䃑1`:VֳR4U%NQQ`pUR]#cg܍*aagX,c!G,|R,\K6_dxpHÜRݝQ_a>ww~BxTl'( 6q$BX]/e 868/l-~ˤLʊEUIEeO)2SSUTכ!2be[YI䳵ɪӊgXb(Z'Vc|j@U F B2NFNE!e$Sb."ZXc7#~&bbnjmXkAkvNj̠ ImxDf,@ eOQmil!רpd+:BkiTV.jC*:Fu7ևȹ_F}1\hfF4/hiЈF6X%KyklET sB cXڶX95bdQgB a6H U"[ҊeԳk9[DYC:l%Ŧaaʥ.BG'rFx;wHݧu(ڡŬk{LPHR`F)e5"`Ji\)WxBilFe2E:y =jk9}R9^>~aͫ1<ϭ+7I|SkVL/O׫5&g񬂘> ^5x[:B_qbEgKG޹Q.dʢF(FBx~P-i0 2,-QbKV(}+dP N[ Jn񾯄'&g vK!fkS=x`_4κETcBimiMib|浛『1vt[\T!U9N|Y]TM蹠~Ѽs+~Kq-L?>` v0ÿ 懑#aZ_jeMF 2#)w6Y rp79:}ynͼ{%7s7_"_-˟Hu zݰvrjQozh>ŕz:{U;\}}]rz~7xTXx҄WN#iu^I%ho܍=OoB[.Ca[.{()24rl9|l[͖ UA@9'!dlYJI|FBL(XĔ"L "IZPڢe4[Ed eUc[9r˃*;]mnĽq4VΕroWk>Mz~TJV:&Ez4AY Y,PDxrxcM{\g2x(ơv8yɽ7,Y^Z6D"s: Ɯ40ttXtYb0 kn-C(׻oަJljr9Q1N ѓ(a,ӥ0&*=rJJ>Zߓn|g? l|/*t\Cfݸ~(E˶TPԫ]Ƒns};Wc7w.橋E/moys\Gny~{_1'MU㥚4ǚ=$;Y! |0EEQ$nM3.Yay9̏Щo_}Et]mM7'oJOQ_:^o1 3Ӣ7 {9_r} GGw~hש1Y!Ƽ7DT;.3@~]~sѿ|sK#V/o~G 1z5.i*XL_bg+bXK%"kG|;z͟}$i͊bq$/Fͽ`+|RHMa%kDm!im)g[Oy[ZխƳyPmvV[O{ ΠW:;1*h]#v?CxͯZN+hit>:*:[tPlzY䕯1 )dGymfZ*#IC j7LhT$\,*?V極/9BXCw74!yE^u! uЙ0d.vsuM1Od_[{X |&4/˝?*E:XnR/4؝O'>۲^A~] PHdETu\@/,*S"<d0Qz,> RE/mM]t~M㶱u-8 xiKY(LTS5 _BN-PER >[pڿtrlSqkg0a„9|FyLu$1N璜Ԓ><5Ր&YV'.8>SVާKKɃst9)UbGZs궓 Pql$GP"خ7!*L  L`o{6h2d6hJ0.d Ȩ$ zXLmRE T֡nSآ1/%UBOc{oͼy?j,/?7O]nΕg)O}vrMbV&&9.k$t쵯n(fHNG#TZ6rf>b$xcKZ(:)= MtMmŪ£s&m vPM,_??xbVܒ6~f6DaZ*rB2V렭$6RH"7H[4wV١_3QTϋ~} 1N^H0䓤bu !G٥& FOuVkqƺCیCvX:x~V7ַ!]?[v|=*mD[$ qOgJiԟyOuO|㛎y$_t( Z*С'ӑO(}IQ!!狪vK5|琡BCϪbim⻳-[ho!|H뉐=BaΟigcJV{ sK.:Qw0Qcu0iz[ ~C-xmӶJ`st ~},+ ɶ:p:9;wھJQ=5A1w'&XP*!8(uVQ[R-HJ26 eo.oJtc^R>NsrC7l^;|{ss_muob.W+gPA H(aWD :h Z R1ۍu4^4B+kaP _b\ZTQZ%1xb :qB+'Xaj%V] \NFp2+ ms C[G5#g!Aܭ;=^[8&'u5ͫST+lx_tzߑȊd)G- l1(^z(:xySO3^NYziɗ$s3G%J`c=I5BER~2A4<xVVp08~b^>ƑQ@퀥6 ̋R:&V&Yo'Я֛bn?Z #@Qk4O2"q{"J bUa_ \HZBT mCXHImTBV^l>*;WT9(ʊR,$Dm$3?#r$HwW9[V9PedS~'J߲D^?L"6j1jT:o|#EFyR<ˀwnmK+y݌\]}#g7'>dc֊'俟*^de6Ej4DQoz?>;6.f\[^k|A<|lzn@㪯ܟ_A۝^)O៟D%nͰ47>jq1]sy# ~}{\g7ϮY/o/s~n+zbgywskogWROʟeX5/꺗n8X|zyÏ˥ {`ڋY8Y7ٟBPj=}|U+C_›۳kȷwD-o@͛/G Z3v! Jaֿ~kO_7},Cg}ws.X8YQ_8+z[}ox|c︺f{Y/swwVʁgg뉢+{ m]w_MV ~W ;+g.7+; ~)jxW{rE"˪ӿh-+,1?( g y i8nnwu#3ejqsUW~>/VC'i>|y*v%?=}M7dGS?v\>Dsؑ]8`vw~έ3<&}yoF}1tփ]݂mBkt7U֭Tt?Ƚӛ&7I@3%0kɻ C;Eng~ECt@Fž ]C[fFz|iUy]{pdPqay=%Jϗ%|Fc5[]}呜b[6̨vM/IUpMmb0fuL7 >׆{cGٸbO#l Q7}t3V[^-^uTp] ^5H==š,?rlr.+K}Q;œIڦڞ"PV7XGMN.PCxXF]+*˩3:u'}OxRrבrW:Mp]?-WJ#Z2=e }>Vh ko\7B+s9FUXCUXͶijRtm)uBjMP+c95Aʫk1!0Ď"2TGFc8|}GC[}{`8pЊ¾\Bך*ڈUOɷ#Ld9R]JJ?;v:,ӃvuMP)a`\k6`CW;=_S&,oaObyt=mwmWwV#v~[ߺg "e?Xm(5:tM]^yM}Z5ۃۨЄJ) ց0:ԔB Vkm[gjTCyP8_Y/<u|n>M5 7 o iEsq:}M."RTl0M Ӝ7{w̮W;:z7)|Wc+*ef*̑g6OpޞD-H`hqhir4SZ]4=AM+v.-EWD!{]1*Qu+F9b\i]WLJt5E]YwH銁]1n]QGWL+D+(gpo1p񀢫.` ]J9j/EWLkubJ狮&`5Nj1b\9C~)7茏Ʈ889)brQFe^ԎMNyJvd0 quDkGWi&++Uto/֠ ] J7)"Zm\bJ EWԕ:FV0?=8/6?=}A5QNTۄ(i5m5mҕ51b\hҹ)M*+k ۳E f]s^+-L×EwBorǬ&ˋכtgY{jXoWYu(k:G>_Ҿ?u%2> | ߻'LNy>2j5`ġE;M8ןF_"z˯ҧ2 _/vof7ϮՎ뮽ڛg%j}͛o56BLU7T뺅.>9=Ҝ{4oyڦ tU6]>j7nF>J#q߲zkTƘ:*PqzZԲncti'%=l^Pa jl;[ݴ߇d+ *fܱg%bRbO0v&Ut3PΈ z170VC70rcjQYRtE68ף]]1*u0(-iG-FWA}bJg&hf0*/FWkLaZgrS2J;zItlU.<ԞFFJ(Qe`^'J]F(HW PwɷItbʢIJG>hQ~Q3?ݺ+ἝiZ;U2rR4ʹcmÔFhz6[銁F1I0:erS6߬Yt^9-)b`q+rS]MQWS,IW썜1;E1bZ}2Ȕ!]MPWG tEG1瀺b\D)bZsS)yw)]1\F1CDT]WLiTuwFrƮ+1CLkh&}X  btŸi˔Q]]ޜ@t`Ǐ=vFJ# 'Qj,2 2EW61$]iV]1uŔ]MPW:l~=77Xho g M(FӌMmkY.Q5 BR+uI> &jƒ+g$$Z̎/A# r48̺OD+SRWX ?)ҲtR`)K)iƺҜr ,%|( J 8b32RRl!)M()Sl QX øVKz2آ kbtŸ6JѢuŔ*HJ X[9:16f?(*ChL𒦇p4rf1.bZҫ+ܱ̠Zgpy4ڱVQ]aվMgݙbhp*)bZsSVYto}iIע)0TK͠r@K R4ʹiX4=AMK* FW]uŔ:]MPW z/HW~G4 ׉I6d3eEWԕu3B< *'FWk]1uŔ$SԕN +btŸ&J?vŔ^]MPWw]0Z98)"ZkTbJEWU$]r 2[2)*+ZP;㢘i] SUtbtewlz{b$[#* W<ԞFq]Q̦]٢}^`+vJj+EWL JC6-7znԉe7%6?Q4‰4z'ED{O4SaPhDgP+E+'L⣢gѕQy_30LIk9X &ɻ&Be0tmʵ)(RB+2IljST)|RT)\ᢘAu٧LtI'b;c77 _WČ2m (*70+ob+ڈ1{)},e ]1p+ f1jҔY|SUD!z+5FS+%|9r;6;.+ HƑ$0VtFi3t劮mze7tEaiiNJ(}u!|t)G507tjeh.J8oЃ M}IWWR4ʹcMthz6N+o銀btŸ(&dZ}̔]MPWK%%F/В;i>)C(,+vq}+1w].$EWѕy7a|8x1bh^ bJc&+RtEZbܱg?vŔ;]MGW!Qqv)7t]MGW1X ]A9 Z1cWL;V=$J4EW/GW~Ǧ'bדJ84ghH(Un||վMOIb`q++ԡj@u6- z$sw[g' Mv Fӌ;v4ڱf'Q7+~M#e ]͒pRtEf# 8St5A]YPU~Z/olĞc.ȧH6̸rmf^$ 2TOJbjb.ԱOivRyGd}77@N͸J̈ {͔XnL1vJA0tE˹LL!ڨוS+ ]0Q.]1^WL,)*h銁+7{WF] 1[vݪ[_ ؇Eă>Ȣ";=Eʴv=#Z%MTխsϭR h#=] Fη>GWH0] ~1 jut5PjQW Bw=uOmzкAP#zZx]=^]0YZ ] O} (BWϐ4AWd 2JO؟0ThbiK=Q wEӀK-Z폝J6Bϐ 7z8W國ܧt= -=] 7.)t|G5;格Z ] ,PjY{te-EeDWjebj)th7]`طWٞд"mWW^]ds9koq<5"kp罏2/I:S`unw/6%mS>kQ.0whEa AQFD@#6ȏ$7綽8KcU!Gd3<%cNօ9 ~{T_ا\?oNB1 yrgk*uo-TC*)*F[u΁zw:Ü|X"َMѵHIZ%9įZk|0 46G;'Xr!xQkU {KFC66Wn.WP< Yb1Y\S>Em5R)0f;DrNU.Y{u=5B }m.5SGގ`ƤUȗBi`ES }FnQxW 5h NAkah:vδi樌5 ('έZB2&Xo,B&ȈG#A]O969(P"Q)_C5nV!sN=n;H Hј ji,a3m>%T MIJ Xut赈 )-f)f%'lmՎ6< cs^y>.W,|ֻӴWv\7*dM:A[.n[m#FOʔ=֡8m˿NdǥyZm5=εIP'BCkBm1Fv('g = A4̈=X)5n(!/[tؚb樇 tyB1C[uŹEͬYf1MA;hF L)cY-HOO(hRC֢7b6vł*'~~+FqĵQvbMC57Y1wH._ê Q NyzE5FHùE5X{aw:H9-й ᧀG:ʺT8FՂɨ19MC)YRg -jYTAljP%nIXCLPe3BBvig!yנB]ǒ?j'l5^ A8ڢ* g`*a2¦f@ 2q^HΟfhJ7a#d*iSLEi8 8؆kCW$kЭwA< \T*N6fTS.\;KC1ˡ옕q3PdpIԅ̝q$@Br޶Q'qv BPq ΃rt)en`j?_oz7f Y9hZ`5*AU´iw}@qng3kRFgƵ.9Qͣg껃_`jGz vM)o/ANVכK:;;ifjߤ3Ok;ߞn?8d|c>~?7W[|KD3el+ܼ͏{.OW9Y*|juswE m%Ӈ]Kgh㮭n׶@ߪ\濑?t_ઈKrpXεM\h29:){ $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@:@8\Kq ! 4N*%N2d8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@ Z˹'Ѐf1N u@@yï$NgrE@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $Nr$'{''KGJ<'sa]{H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8˭Q[qchn{}zC/ݥzw*Ǜ@*"-ƸRK-c7. Nq9>H&r\]Eb%CWombv(=>P=-AOLWiah}|zJqU|]E=iMDW5/\K֚c9gHW3ʍ3X|1{ݲ⾅WKz QĚeVorZћg5aÊԟXGbO~|l$`bhz]hCyl]hvIE0;r,Z돝J!]q 5R hNWu*rCv{zznWW] ^lƦs=/QM7;6T>'7Wrsu+??~_;{OƱ_!q>lݼg x!)0<8VC#rh6 "{kjG钎4FE˘"(H98rՠ`<_UL=UXPE_)V_|E :Rj3fȴ1KL0&_@D d:t!)6Sl6AϥL3͇~& RV]e,:uv?CX ̊Ga&z\pcgmI gZr9AbXqѩv>aa6ȼwٟB1|&.tSK`Y vF:j ΈABʕewwuX/03NScLMuQoFє4R5T㔔A| nwDDL3mBmǒZxTsO3cє+c"$BJhM^P*v ۮZ:0K6Ԅ/{P;6yK˼`iS7#h]}Crw0~>yy<S,5k|T0~Eo iqF޺ 9(a )<`ܹ:0G˟6?/-l'աK{numZ5E]AܛzBӳg]>߆Ұx.5;3]rl"V\dA/X1?Grbfy=/n>ϫ)=AyN_ťʺUpv~߾S7,jE5D~b4-&0&_+r't5R^S\?^0WCP;6 dqq43Cat`z s`(XfXV8d>[X_}4#XqMO[ADIQ;Jfb 0Vh$RAD׭M.J o4Ş`(Y}`L\fbi|M2dzUc7XJ- &h;vlXXk7Lz[WO/d7a;Ps2R9v#e *Ն:9Ԝ܆pWm3ތqYB&-`&fwv 8U[k$6݇0Kecjzh׬C zϟzgea:G@}Á>@AϬ`M#λW~WznIoW6èhTQMkY_)Gh 9%DaO%T0`"{&CD$ DÁ9)<,sKG2l\a,[0g Y=ͮwSM Op~mV۟ތQrWVly]0 L~f#s-up1!Vje:\sB?fh4R sJ9lgӍ'=,y6ɣ7z̄#b@gE 43<(~ugӉIqD2%ώG%<AJs븠+ ߱Yf3HpyKlAR;_:LE϶?P2 ')E][v:ħ=K]U ;bbVtpTP1e3a5Ib(Y(8-B{e Ȱy^cSi bK=@n^Y_}]%G՘.@>͊I Ƒ;1:f03ƅpn,K=o1"_o>BB[hVkwO{-e7-hXM7$H'IOс)!H;i~ q|(N*z7ˋ"jjn<^iZWRPby1"aCaUE~*R$ /3f50m`JRzk& .'ߘ"kvS/I ]W~{i LEa2VPz@ j0vA8Kx}'W|Q>JUAܬ\—. >406sc<% t mw) ~s8Ou;Gןa^ŏ_n4Cn$OzѻUo v\e ; ^fS`LDd fa|4^)ih7ϴ?/$^0Ή,p |߯C S{-*K6_ .i}4Ew`z//)ĸR;x?V7 v3[$Uw @т^.DOo- (RW!R=I!zTg2!J:a!_>d>m^[n)܌FyMO/t0`[C~>l?' o2DG_ݖ 908@۸#kwG~ҚƓ]nvVRpYg 6LPi `ǷO$$*5yScēi[o7!х3Y_\mgzj+S3ɂ`1x. ('PsĭҰH,"OST3N*K<>;;yw:= Qv)A7 x{&aDZCR N-z&H(up+92(lfү3omgE-YǏRRomu:rj{9R*X¹8ӕ\rfL2B0 fŵ6fL9@OR)aVٹ`1 f>ҧ\0#U0c;J=q9B c^z OHUX '9cy$%A .yH?T0>-{}S>`g 0r4)e'YÌfZɂRi H"A^Su6`x(2z%Sbf3CNY.۸S-glWuntCmR" `2}̴D6HAmc8K+qHO0pivtվ^o^F$ cڕx% "h\1ItJ[%&FDNO$tBI  ( e5BIBҚ2#$ ;ӱ:$CO1a[+L_0oKl}+WnTIn>d-Snu_(KRW/Rٴ4H%Yrԁ#CUl-Hvo[a_%xz'B_4yy1uryIT3ibxxR&tRG? D#794ʠXrRm$ VHҔ$%7kbFC,CX0/SD=Uu]3q{q֘xZe ^}}ב%#ˁ,+"K cISWŕR⪴Z.J fuzuq9$wsblG9֙kS`@nyq~R(9!tEe1NwQ1'L{a z=u\rޛZ^wKyώEY>=ϴcyy-"48{(e0 ;(.d&"Ă6 ҥ<ǐP8Cvsn۾M)}Lrm8Ypͮ|*MSw6frw%C~n5??lx}^u]iW-ft-n''gW'Nj7#E%$&ΈM4WZ "yٹTRba ŘzlY=!(JMu4#2-16ycybk7ӎ]m' kHfPBQAP&ޔ}D#c*kKeu'#! 0)у,L &8Vڱ??;=m@WOe>'527#2վNjg~ԨM{f~*1anaqѠإA(L]t(K(N{FUhM^Er.JcG!麍$Eȇ .L)()[m۽W$֥x4g#ElAi<L5%DNV8_Ž=җv~tqaty a͊I6,'M4 붆c0]<7ĀW !ucD/ZJ nkj`cĸbM폍((kTqmhLӵHҸ#P9EGԵ8K< gڬRMnZ RыS+S'Xt"ֹ'Kt>߄MZK&(PFTRˢy KJ"gC$TX;9\Ow8PY9gtP98 u2{6lm&pk-y^vx>OQ>F$с!au^(:!fB~s6{L,?-["(JG]s MdcW80*CVVi2KD*/D&%QD"weFULcSQnG=/qU}櫮.}jkf'5gC~o-S l{ ^,a2;o}uļn,\Ug7;g5E8ȁ@ %Bw:O>mz7ãnnrhԡ{E.cHذ"֏\YUq(l*yCuDjfNjU{MQclUWˮ}j/u,T?Ux1BZnx:=Gl$b'/?~T_^xeqr@"P Ӈ^qr 2tӻ^?K-oy`ܿLz;n<+ZJH!k됍FK @Li>m !s*!y(E)aB&:Fؚ澙8k SIHK r`#@ˮe2H@QXg}(E󼓒)DR"erW+HS3͸s}ҫ಴XbЃ"fU.|c3E ˮ vK⯙-c{(=3%'1GB9C{ƻ8;ɛkjzhVԡ %]zo5ә貄A+T>2p"6 *m^BSd-̀&&oIf6]4PtB !DtISS/lw HcƔ gEF nl;==$K{RB yE"Y%:F!t )Wq6F*|N\qDdmmYШj#QI _fSMv11ƻcw~q~{| TZ(S -1N4(["8ڗԦ@cj{#נK-)m|SL!(^zWKCj5v>*%؉;Cިom 0bg̻eiѕt;aw4i̗mwJ2}_E:\oYrX,|4p@nX Tagt%7]2xBB(.dEm!L.Օ7 jƴՄhxa ꯜ/ɓp6cHW*-}ڪ8nprg ]:R=vТ{L1NPXG.h;]oGWH.rNno`F& _!ArD 5,QfW5LXz@=Gz2pd0$LzRDcB<{d<)bF(dZGܪ,MQٔYQ0pJRMp-3!kb"cIn;5r#҉I}䑠h?5҅qF!ژ|װa1\Mxeo$Ϲ$RpE.I,msC樅cIE|J j+rl% u/ }%j5p~?o95+2pnV=oZo$kfHZ*NoU]~lǂq-reN $˞E4B xøVDiEaTc8fKuYmNtkd(O! @M!0rY襾=dg}-7m? "AmniNdKx %^x5tawq T^lB԰!$l2{{n8?.y~PB0&iQ[D8#ɚ0Y6yPc=ϛfzބռLuqs]kYppt8~33^pgB?Ӄ \\8 ^ 8ɸzW Zwi=r^"|:,&a2*cZ\Sz{N Io0|s}NoٙLq.4c_M>iU9G˿^-pN2֩܎^1&x8ZN_Yc57WW8`]x# ĴMK"b8|w5+f7/9;@uǥ~w)ws b2*zV#*(hk4_)cdRV}vv=':f3m폤hB€;V kic)os)[r[xħxj^;^"uGV!)I <*Ej*Y#^j ]oD]Рj$p0 {k|m1v~]v5Zn'0 gN dHNT֩v$.dص8&om{myLqbKXi3=qzM3G𹵄oKc`·?C o?|?]$1k9D.Y$v}'u6ӕptpϹNYAJM jc.B( y`9W> <>_4k%,2v{UX@KXL JO#Z#w\OYC={ip--Qu{3(JtRf1(Th$xgJ G!3s&Ko^a`JAm޸$e@+Rq YOZQ+eJkvڡ(|N0;œ PљҬx[!vUU__~W_.z.ZOTDptR:AB,^@:T@Yz%5`Ė$s)1QkC"gYGA' ˀNOo \idFy#ڕ3d%YCȩTM-ALdEK&21b.3'dr9 嬖ߺ$f81[{TNxYj"NzoN@F0UBMy"XN##IdNZC6jEMJQ74C$ \c" ӆ`7Wrd$q Rjb%VF4PܗZ4^{Xy0'cH@圑LO6{+VG.Z#.=md]px|7l6hbV0^\1XŸkˁLX- ֢Gh;эwEZc#Ovo5MP 64:"KA)`R9d)&4pIy,#9ўxlئYr59V:ZN%:A` 8 TM/ 7p#%rR>O,%7%혶x"Z٥C@{D)y>YE6 梌A ݞۘzwˇV"Y$X&v}})nU=iPo4,h\9 K1Ҙjt64mr鳷]~yo~O>wB}+ÊWGܵWjQ<:Z.jfHsָja#AZBIɮކ[R_7tC]:~{1xz@w{AXu?7٪0it3l@cL1R_։/(YvO]VN!׸/%YP :D%>Y%&VNwp(8`^ሴyUj4֗-jT_$Cʣӛ~!l cQI^Y^d*]4\(<1DDFfAaI 98w t.l ԔuS71HYl&l"Xs.dὥdnRDk~q[ u(\GsI{R훳[XLҩ֡ϭu5﷔*5TW6AL0WmW\ YNO9 L-H d9+cNϜ2ZD rNz4kÐn]k7mr9|űMNhҰTp%dp ٲЧKL$F>ȼ>e| mISB(wGD$4XN -EvxW[4vytIR d'# d4U a䬣L'EN58No3ؤ/PzcS7qB]0ku71$сsNRez<3'͈DN#AnQy\ՌQ?CdlReL.!PQ~G$ L" m/ =ILeC26K,(eY= mOgP|wdYӺ;=K:Vz?bz?W܁Yp…W,TYU tOEOԢOe\MNގڳD{gh={|%R tsϲbۏӋD/\HL|KM0^7(KN(rG. w. {B );DYm)zt+ZĔRP&Y#R ӭ2&(I$$u()$!b9(yqOsKӳ1\V8IT煺p64-'ǟim3ƻJbJ|[ٹib4:(v #h0)a4Q"P3vK\(Ҡ׳ _@Z(v%c2T v*&%YSF>d EeH0sCLĢ'7[W<<)MID9ABԵ*0\DQ(rDBE l7aT&+k5)$;SlM2{ـ#Q䭽qOLPfyg;{ꢔɱF@Y$ 1VEjTXc&oιPWPx*H["4t9X=90XֈwRZb*ƥa u;Ev"J< NIE'^ Z$FH1TIzDfb.i@LSk_f|1ٯeA7~o*OX4*fތ~ȏOy4Գ7Alw8py|eߏf:sy& 53>x ;5 #:8yB O4\L/O 5c#)Hm%Xs q[ZJ~jS̷ ҞѾ_E7on")ɚdtu7[Cy,nZy#>5mߏnZ!ΎhUb9/rrhlnJpd4her[>ܓ<ԒMͨͰmIOSd|rxӛ6O/9'VMnjuӽ: psϋ1R}>9җea7 E1Mwzv[oS&7l7Bz_ Kڅ$<$>s]zR[)Gē|$[Y*H2 ;.I4f^@49Ee#]*QGd}P҃""Ҁ!C"#Nr$ڋBy*3Ё;:ÚΖk:* YFJnmliN۴jHymy</ ޕ|5ELJjvoD!G Ĝ(H>xvΤ(T$JDNNJ)d0 b :8'#YeN%YG aLpEڻ\r.hS(t<@:3dȀiTL|d4 N&m O+@X㋇y'%#SNj,,=t-MHҤbC]=/<3#: 4jg%!W*7ޮ<6h1bsq~vR:QZj!)3)ͳDnWÍ_g/::8] gތ\֒.;0yOjK_v3],<'G]-*)dDٳHLa ISwYܚ%uS2l(nx$kFWGy/n\\գ~Hʫ#/9¿%4T31bRpdR oJ]2V]3袜pI9R!DR"$Kt Xwg<2}/d&ֽ=`aQYĸ5Zxi;5dw)(Cr eJV҉)1 1RM(,Xbe)e6[me+c3sy[0ii>ou_Bݝ-CݫRuo+Ϝu^7,p1~6/}MomW+rkؚ·ST~x$3s+& !QHBՅ0ϝ]S.HYmM lI < T@*MJ>'W zL Tؙ9W ;b abҢzo7iv-pzu? ?9\ #vN0ĜQx]M䳵ɪӊgZb(Z'-bfu|RBP*l`0ȆP̓SQHɔu؝0b8}AzǶm:Fm=`-ʑPTT0I|4с-Y+ <@FYdh@npEcpZ"{5+ً-cM V>d RIDv]W̜ukq-80> "v?BDFDomIjQq5qg*6 ;sQ)Ɯ ^m XbAR1VBJřBtM6sȞ1*Į3z;3G_u`\6էXggd[\47:\?J-;=cU t`Hrۍ(;ztQYsBʾ0&@"'_|ʦSr$-01%$DIJւ5($$ȚMA_U3s&GϽRƭ9~^O@$ۘ|a 3g~C❓O_Lէ?! $hUVpG},kDtOّY Y,o'w޾f~7Ӱa)`R-{You@Uڿ1g3_?[6NQS1}2H+E%&@MQ&tWw78:߆ƎZΝ>$ljr9Y1N ѓ(a:*:2+:(l^zH&|]ƐGQH!; ΫD݊ӂRh|Nh+UڰQU%TIιXT(ku83s2B7OA}`f2ܓ0mjnKَIG^{JI=J};7lb_R߫ ^D5÷9׽Cv[C:u&B*Nd rQ)8%FsLєʊ1)S?<ioM1|`Z!It B[wiFg AIRw.)dI@V' lSGH01Y>̠QGRlk ݆P /`X(> gw%s1\hrg^^2,(s1$KXb`I1`J$r2~=2/OaAH&jq=E~pYw1{6Uw6 :pkVZ2 ́_%XTIUgEgBo6l S VIJ:A ҶjwV6eT&gHBD6$U^S0\OQMZҘM%bfbH5_yuz4ggϫojշ[] , 'u;qd?(H^5q5īy{?xey7` *}h/CV3x6CFH<GxARQN"_Mg/Ź $,~$Fp60_9}_9FLn9 {?2JmY6xvqyjj(=*ryH:n\MmB^:qM~év洸z0Eofgl1Ls G|ll<5`4t5296#iymȭðuufyBi4?*>_/fpvOZVCxˡ!b)o%S+O㦋ܫ/tG>RP#P[l/ndfkT|l6;ʥ}t@nc::(R1KL>P{g{%QPNRu0k4)Sq73wk߳-?xa=>U9ΏYwݹcͤW;CnêCԚyvxU$E):tuMIE![H'7uavH,i*$""_x3SI(-ZAc $S9Z$`]J{TJ,ZZ`pU)BH:B@1xUk+ &z ]<F@d1BX}('#wo H3r+ =M)P-$VUle%ΆTNrLHlRz>r=/: #ؠd}BlV.iK|Ym7jU FZ_9 g]fl_dw܍PqwgWr̡1Cu`7SguB~+u+|rۡSg^ !$6#,א< F" xylXmIwyiU "IvK.y/!w\^A^w|Qpv6"`~Kn;x\h)HX^c"UjaQ PT$%J1[ImF[mer,U˷ nV4:iP5@g4 ?/GoCKsd۫_FGϣ-P}S.ɇ7WzYM[m9+wh['f*ubnCِ۰߃҃:վV9>ׅ<_yXCiw?`(E@?u c(/LG: & L%:asOv̮xG'QSLXBC!(S |VW| owDȚ"AŜBZ"rFW@ ԒNuQ5#灡;UބV( ?KNRu"x=BDX`A(m! ^AULKi~OZ8u1XL?2 ,$LۦFc*ScAmFJg0,I\k';FKOZZ17^Y~G3Oz+k-Su⎧K| 矤jJXbII^EU$Sy\)k>0 mk[J%FME@7.2ʾe V\Tk،0lUf.4u:gՅ+fWo&i~/P|zu7?]8?>Ngp0$,2&P$VF,T<[&)` 粓Y7Z`XS,dVc+1*FdUʺdu ')FNEj$$*"9Xc7#~ǣfbnjmXk> ;pLRGmUǐlHT Z)B+ L['\1օrĢيNjAZIҧ|\-%Bdب&ZwhF7kJd*}шc'hOh.w. \D, 3Sql%)eFS6, Vm[NbF+b,UdɂSU[Ғe4ՙdL7 4#Έ_ud8µOf\^l wu}Բ/٣(AH10 A!A/>^ܛO+Qg|Ї>|J|tF6 {rr}żIpW~L^5S d7[=ͷ|>)=9j߂|d<_Y$w':)(}4hoͷwAn^$zr}d ~6my)߿ YwZELW B^4]clܧ&f6 Qx4EgU:ć D afgEіҸ 1) OٌOQ*Ƭ6 ۅ S-)(UJC+r7zyZ70G|QvT8}B cW,Ⱥ}:nW+[Pm3߽b8p?e/tǾ0<Θ;Umu0xi-L0'@PSv|A.= =yfpQBE!P\!!T "D: IeUϵyTB!fke;¾T cmd"%$(͠[Wl{x@GP z)v&#*F;/,# J΂?|GOG~ip_ Y=S$BEC1du w3$PB h4jBM8]M9kGEdR j/0;q(i õ"D9(Ij9d5#~p:q2WΛc>m|[f>m?zذR޵,s){Fn:{VÞ1#wT3)6ffWU*nzjfBoO^q@%LД FmD{8)82V(fFcn-gДwS8 ZCĢ[eTΚ<`߶ͩN:*@0lgr:*@mU3\U_`bƈhɰ2QJ*Rݧ44ZM:cdLS \;#-Ul}z_7>ZD&&dh :k4? N}\G3ad $x ,N2ai1MXxc'Jo 8M?b))9xٜ*.Be2j$FY$:`1DŽͣU}="il_q]-'kԋ1.-Q)ڈ)JB &Tgա>~X9;7.mu[.v6 s"FsAe.;ż}4AD`]?}C+HHKpo#951mOcn7#$R":MJpy87D0r⎧ZoSo h‰ɩ-9UoAnzo`~TmV~lNӥDygAX`IiEnl5dU1xT2IX%_8ҦԒƳInFXZuM=fo6geܣL-M=TG/'==4CChT5hO!!CB NVIVB;&H4II@A MVSV ZVWyHH60 /Bsfh&*#d %e7ZeR+9>H+$IE ƆREcvq+#Fvw9AE)_z)V"G3=`Oޗ+pW \\>+}Zb]{*+yS\NK 8. "f`'V YȂ }ȱXýĔsVrk\twl$7\5![ht8@A'+'>I 6W,N)ʨ4#`sT&J̤4u1*- [jQ IbgQW.?,"9o45 vQ vlލTdRLI͔eiEN\T;܂i@)X!FBYAdRl!wTKhL:֚ZZ~oO:vSޠ7}/1&/%tU8q0"/2Ua֒aaHTɩVqԢ8&q:5/x]} SSIZ9VBzB"dcŢ(!PĀK2:Mat+m5FD8 _4Ejc=z]X#mV`qlxLbɹ荍:$&UqVu 1pt騍CRtMm< Ah2՗ ]aHdbNcbQB^ ]  S-Z9ɕI1HdlH(m rڄgUv: 59TW8ZcȶV3u[oWsve]ˡqm-YZ#ZZ׽4s><8F?5XA =VTk&vYGNIo dbɧ(|,zG%fPA4V[/"O$/i,Q&Y/WT[uOD-mq4mO;7tў>d+~xlM>hr_k=52J{qj82W L fVjʼo͜dn Җ'vh9BiHfy#XYJŃ3 ֍a9"rh?9&=Hx(Ek18N @3Hh!KaB`lFΎʪ~1ľnjv4U?~sv|<%lb*ʒ]aer:v2P&vd1gmv.Ѵ..NY\t~"j\)b{? Ax1$uNEu %ؐcSJEΥ(ύ~A7V#E#d^n^ZRD@] FAK:,8L I` \ z\7ա_&`"ë<óFlWSÕ[a'V<8G0D/07)O|4Byqp˒UZ(OV2&`SnO=g8^VTV<7cp¥1`j'(JGnФ1~橫 0@?սԣ60xsbqR/RJF8/$B3nxM 4{{ MI/|!"NXL+I]` a]7-*dvW!CۈzQTgs \@|v'&{%pY9$8&Ag$陴6"Q$ n*d8yPg%*߫7wHAU248 >fg!*OCuӊZdDzA,x 0Hp|7<ŅW ȎAF}q v|rO]oepT'W;~?OεGeo%\/_ IAnWtwuK5GAAn^觅$v&7Q1|^ |4N~{v=^ԋ0Y^k_z5B D] tW`,H N`|]G_gS;OsS^0f~/^_q5bl o;ʼnB#NA!K\&U  ~l5L7~[W{k+'.TyNi dq*yqnY6Va1!&;?:C+%ldAFf 2qX{>Iˈ;W/_IbQRsJoZ@&hm0,_b9,C-x0vX>,U@k嬧@"vX9? k~UwjeCUz 7),A')+%hɑij@ucu7w`Ѹ}rI*3_h򭖓{MLlRfeoowYLhީ7ıdg"*Q}&/G?S(puPXEIWVj6Ney3p`x[яZؓuCtdUñ P*RiPs[|Ny8ڙs(|f:&sۅ/3TN'[\t~Uw |w(6~<ըL+0ΡD#EဌyL"+ZdMu,r"E%( 'EyNUV|*k3ad $g Y'08S .q5z4A-\/p9BWXQ @-qaR*~:7KCނ"'*Ǻjv.J1oT٪4g`J٨L.=ue*S)H^2'2Z䮜(6\]@oUV]=uEw\zz˜\]=`qjq>zD\ _6B]G+ڪU^vZ(O2[łWA;h޵q$e/y~wqpI.B?%%R!)bKGȦET`NWUÃ㻩˴8pk9(asyXRBfB}|,Ƴ=P|$Ynxj. ޺EXPÍӭ_5cښw.dVl{~r: ]LFpy{Up_콶#מ3BCSѤPث8[ed.AQT=Sb.Ƚj,mƹ K}Nvxߔj~J#jo#>06@!v ߗ~[E.y6ˈǸ6;XvgeDwtZQw{{:-3KnkR셦pFnZeZA6ݜNH l\/7um?ssVREy3P>^5\)f~U6Nrw|uul˴/tW /=0GzG,o 5M}QfoRtvL?xj&بgOEr{6L-mf*%Sx*B/w T2=o l3/gqL&Ztj!]>CdҊ;[lf53|Ei*"-2H4mN%/jA &8JJ"d &ySƨdbg(o*D,@'S69 ܢcp9b,^5n׼}J9FVI5 kB[5@%c֚.y퀩 |t)(GJ@;8~4 ѲwO8$[rcUNJ_XPƓF W[/U1>YےVຉ-V%bȓ9]x6wjk 'Q-tV~i h#ʹ")ALN"7 ysf "K 0mVSFs$PƄ)CF`c0) cPTqK TiX5c9RLUu, NnB*#"ɦ*{mC?:vO~{AmφI" !PDBG,بu,h$Cf$pwq&W+@B hISb|La]5œ-ZwUa-wZ{`x!e1: ܀ {ebF23nҦQ4K$풵(Y[|wRُ)Asqr6DLԴa|G*U%f3v:Dj!춸m*QnstunkW;HK|G,ĘFhw\wo%u1"!8:Yo0 Y*QǸ \VH(k $#1(JR< zyڞհr&8?}]@>=Fdq'Zn1zptכt8bvxfVs):,(7f D\khİ*OnQnH}tf )REVwf%dtAΡՠT䜱ICEi0o!J(2r˜ި2c'3Q{˨Bd4P:L[%wLMFVo۟i%5;AC%??Dߚ//|+O pka?Nm/q'_՛A!yl]6CuY sO :9 CXJ%?A)v8z8Bxϡ(@JJ6q1 + 7q_Շط!q?>6Jym/'hYDx|z0:!'h6\8Ѓj Սg_]ل~mϼmN ݳy_z䴝[ T~匿QSo:R5# xaX0%t3,PhC3g1[~z2٘gg{δNkԮg5ٳs?Nd$,}4u*O~_xS={OSEܖOW_^볡yד,&3]Fc +Qpt:V~|ݥ{U!>Y=]S<~sݫ7)3G8:~{E+Nv (M¯kRCx ]̸P;qdqEhTapGA]y}M`7=X1G}$Db`2N 'DܖpbZ5!yj]ɭ$r#}`Dԍ-/$,9\rXg (-Ib0 Ḙ4[YqMgGWےp_'VܪNLjVg?cC猝j$i;x7|3i;qD B#ֈt4$hKkK٪tj]OJGG`hA98g#A W݁r ^X%8q eNFD\ *$`IOnХNmr1H 5/$Ϝ6z04?˴/"MOiRBloL*F>PKBk)OFGsI\!R ֠G#]4vJHadԜGN\3Ye~y1Q͜N)Lb 75Q'plǍԒbżHJ=.  4q`;H(OFW+WD!mZ^zL z5^ؤe>_~\f؃)_SFqb:R? Õ蚓CZEkǙcG!O]*S'&Ȟ`rf,)VWu䎰uAV'^)Z+ѫD 9镢y+ fHR4?^)6 % Jm$t ){)ğ44ShS”%;)EJ\d#eGَ`g촬kZӇv!/f};>կ6'Skxix| ?*m^,p!ZۓL5Mεfr'kIx MP}g`fN+]]-G&O9!~lidE#'kz?|;x?U [#wl2Ao}vwC{n#qnm?W^GK+8^ͭޱF4׷vUJ혧tVc){ͦ=7&\ f j4E s^L®/T։So4U7zjw-쬗z ]lA^5Nʘ+*6U^/ &̯5xmɶ֟ۍ^`g6G%9퍷Rk *KᲲK/9D !NhOnU/JQ=c]1|;0};<'|T#З=;t]VŐqWp\^XH[͊*Aee GE7]; {Z CH$(|R2hɭQG<Č>XS:Q{}`#q̖ku 6Ri'I3ic29{td"LkdzNo7(]hѠmG}L}(ڳ 33^ˆ^ X~n#u*Z %&HmI#7QM!r s|ZyHPK$] w;i8%4k:g)^bS8Aя57_&n%/p2'.8/uղ pfNf8HؙhBixy'7jN'H'N&oӚȒ.1>M0+,”IBaMQoNӹwF9;ܞ3.݅fL_u9|]q6o\fdGiWi.~y_~ BMbf_}5~D[!C)Y)yI,<^ Gonf4\uIi8?dQNw\?(O"߶rƒ:񥖄/IdIl$aMwVn KFp}3xC2\d:8MWYr?m^[|!8'kyɒ^ ߗ+֐1]IP}i.%N7|[q+ Th$7Q3sz*wG#ߗ2N1/R1JvNJ(?SkWok ZrYw&G9uSQSzϾ,;il<\.&׷^rdɞO^YngdoUqz .q{Wvn{C2n/{PwlnU]aZUF!FcbZ'3_ɪDQGe'Nna v?4HYv{-3%vwmhUY2pqJkʹwѢ+i=dS yylĒCQDY^F3}`gr2甌F a>H3BIH3fpC .:Cߍ+e5O#_gQdNޭ6zZJDSnq۪Svw(=7I{Y!$Fha.r OVg( +#'F.U-w&XSƲ{oΥD"Le'Ur]^tI7,ޫFxI IY]H&-<骕ުdzJ҈YlWKԵVSWpTu_qw ;Ob Z}O*,^ T%T|K#>1[r8ֆZ3 ^quh!Zq\ 1JF ANNI98$.TBfL&\*-b`JAm޸$e@+RAk+;ƟzBo:s܊e2yCXatHn$aWij&7дc.crj?u>M^>L,B:cpt2 jg1$ 7@dLnϊۙ`[4׳s)1QkC"gYGA'Hң,Xotp!@F ) 0NJВDVnd-0iBoK:c| AvgRj]nG;t x]jNzoN@F0UBCy"XV##idNZC6jUKxP,4C \c" ӆ`7Wzrd$r RjubtJ/z?yiZmd T`RC _j =cth!#BsF2 <حOg| mIS"E,=5%<:$^N&& XT]t]ӲcrJ^$x'# d4U a uTmɲ *h MҁM5IB]w_KӤ&e w–#}fN83 Fnуޫ@zU F50'crK%> /@H~$1"I6 @!ȴs]{g}Mhvp33 :^cuhUGLדq5c~ '\|eBLhPUy-d4kqZvSS x4cG -K]SЈ, F0 ?)<,q0(v sGti0m.g5Ժd}c&V4|hл›rL ]P&gzHhn9~RqyЉ9:9rL#oSIuys@o ! Ɛ[+F")U<ٯS?f"79s/V^gnϲ=~EY>gfr̛2oV(r/dVopg/Z- _?b5R|t8&T gŰXn|uTҿt3Kc'q#OEt$ZvRRp!rvnro,RUC2RP`w]:i9\ei5i;\e)MJ>(aro*ҶcW\ _=? 1p(9/ WWI+Դ]pWm_=%҅s?<ͯUaӼ9ŅZJW9z"09[G;tFRJa-Pz 'dvnNYDS덉pe,|?r/IOy^Ȼ͡/]{qYbmMS 1]}ૂ٥F3)@˃r@qkCy! ;K+aU)fCJy6&n)i3],ɰwí$ CRs&T4% 8uIGЬt7G.eNJ O׼wt8<*S!=LI%J; hI$E Z‚pWiv )0%OAhô:iCјJ ΙÁ<PDW cc:. 4ՋDASrKe=rX;'30wۖ9Q$,k52B:{ 5IwHTĤLRi)D"ND"cd@c)9^EDHL)nXIH SOY)?]]3y0[YW.?äY vU˫̷\ ]¬z~9*~Cq'(~|` b-7u)_~@ *|*a/=E}q@ !C%vpANqB┇7ù_'з8ZBpK6Z\-Id ^Vǡ ON~U RQ7ߦPh~URʭ&g5AF%g8}dA?74=OϮ5rQ>:rr6x^8=F?> Bֵږ\ffJX̲|Aa0 `~y׳=^;M霩COV:u}5E_ً1>? GUa*2lZ k7ʛ{(~/́-.?uYi ,=ÈTE]R3޸[TRTFUKޠ>\!H;woOߟ}2sr<;|/8UAPd$zwO7]o[iilo47boҮmkf8ebvy İyb<ǑZxIHcuu0G^&xDwj# H3x"hq;ƒȵF+ 51΁3FHlKY^kyq8pIH,b Ig5`*ptk:[鴪xc:feۨMZՅL؎.݄팧u `63R .\KhJJCv.gxmM-e.x 4H 9˙捖 BpmZ@[kwBT1\vd9v-HSh=ίI1hR4[mxFqV@4`=p N9*)f`s، Ay!bXg!D)oKgb4ј FnT1q;@GBBh44JpZ3oQZx"%NF,Iinb"*:5Pr<0d98A8eBC,'=twzM2k*h_c 67[ w{s̽cNRE(- ۛPY\A%-TVR.B}Giͯ#sza|8r)PB 袟XUQK/V .R$X&\fպE׻׫1p٩.Qb>'F F=(!j %ՉK9C槴gM/5&ΆKsBvkf9 )KrH4քO&.{dsD doJ!و)5~hcp$rk%͟"8hq2o5J@.GHKaTz4붬5'[Iʜ6`0]Wx[oߊWwNxګoE) 85I gב{&B@眙nP٥deFQbL#N&&NBVXd=II8 QQHTؘ8#cs\1,lL3B0 wsr&>Ccq~}BU魟 h$e!QPQ$gh5 d:|2f=32lr^ F<#sJLXVmFypL̶vcڱ-jQ[v1{r@̫H޺:qiνULGB ũl .x#@ 9obhE&[3Jdc4A;06&xX9$~ bc"#BFD!bi֞D*OZj8RZT!xј ी( %JHh6(-r2a)8Iiʣ,e#@C.'1 ̇7 4&_G\&_gcZ-.6 :;\߻o$$Z6 8hpVh0J XDpqؚcK<\hxگ)qw=y5GnT-'0*q .eV]E?9LF0.dW{K"-i"4󊗁Y.sDۅ="I2ծ Bq=zTr](/Fï׻݈̽./A yTJpB&S#r8eV1'X.DAxL>zκţ~Ҷ=*eNui5)$#b(WqЬ܄GkB98@#(\'&<2bV:.yh\ v&x5:\v%36d# B& T퉟%?E)rPJy:ib3th#Mq]rY)Qr QRHH\D"X+ F9ZHΔD/OsXR,!5L aR8~iUi~/(gAE\'[urc7|MS8E(, F ' 8'q|M8oĐ2[>l,&4n4 8"@6g(ܳ ^]]4tCVp 녛'2_uPZ/etN;:ݴnqlRˆZ}MݝVa6no}w͘Y{Þwt Wv[.wʼnCn|I<1&k-6\reӏ[ܼqm]@ C8m+cnDhmjPQh懃jk޻ _5xf0u+#L%sdQBX.xf(9D !h^'g@kR(YKCcvQ !i:{_GkT )lwò *I aHXHutfECqt!pP= r oE0D[&%ju􂇘=Lôp<=-ufA-vwST+W/21˯V( )h)X$6R\2{LH:썔 xlq)zI`g\SdWe$c=p4B a\d+"~` yH}uz/ vy 8N^Xrz׹xw1h#.5[~@6BY DhDfc}`f -:xv_Q80'ږ9&p?R !^xatcw>SVd-tg>pIR=R{|ˁę,B?C]VNws]Rnsfx^\NsqEx _ q6x0YTi3%sϋUqGӒ/ӟG`?lz9:=_]0'YNTIœt~#O'y:cPcPV OxC )MDe?M*aH~9t9];q`fޝz_~=O-`G'I0_˓)vMRGā=~dӽni^#;>Xp휘ߴsi5~~n9Sno1)I7S޶Ցd{Uv|_\3RXQؒK<#}>\4ӜGG7v=]n}z8 }4N(Ϝ{%?sg_FEzr~e[&fN$Lhf֕cZ<\IU#YtL^9{zm:6qH7|LVMŤ{xکr/Dvlݱ{t]= f۫ (U lf!jl2SZkEUEe9Z >X"agBt\V 0OI2g9%cBruq 3L8䐂N0Gze|үtwqg}7A\U| g {/D3Jl!B"sF}նbs5XE2Bs:u\#}`\EIg0YVC1ͷs>ѯg(r(bcQjK`;VR3 K.EC1b{c:ƧUV: qĬ.$BR'-WhUFHiQ④,Xu3a6ܫݵةv}!U9жJH!e)ڤC]17`6$5&=f+'yd J[Uح8ȈTs ]>JF AFNI9+&qʀ̙LTgm0Zx xmh\2&aE*٢V%w$ӷ2FnYdhN:rI<*,s2qzh)q= ]d׷-Ip˭]iZ}xGE)n:+LKTP'H^R>)N ln i280GT%˔QkCE$dYGA'VFXhtp!J3Fu̳R`r*YvZ`҄ 2oK:c0IAVjg-_'6ӇhB-!_a'7H' JR3T,rE$>H5$ljǯer4Ƈ0TL$g\I9'P8C@,jUZa1VF4P/B#2AGd ^9g$hQEp-ښ8-Nv Tw#,5A1MBZ^\,J;gIc6U"VȡQ9bB z=/5qwHjҴM6 xb⊲vHM^ل+&鄦mr1~tBc/e5ƺA;`g`7PmnwR(Wu+iҲ UJ_dnYn5ML6 (k~x-M>4t?tYe£x⧒zG./8?gQ+95xRXiA|Rf$[HM-:9Z%z ЅЅHu cjzu]oIL7k-Nn/iV6͸>Ule߮4>=_P뻣6=MK-`ݏ?<3_F4$F&lCllR~4ÐHEQƏIY}V"V{I6Q4,O4"|EhyY YpJ"8& zM֎1DDh#|I 98w3޹d(۲.R ȲhȊQc#\?ae2׎)FRG~+@!m*G\ַf캝ZJp^R&m={q'+iJ-6+۠M(m4̕m Ӽ}0̓B{ɇ0!Cx>Ϲ %6B8BzSu* ,C,Yd[!rVr,:44Ȝ2ZD rݏy6FnHNr5,+vzثNXҰTpK2qٲZp9w=t94 ہYp… el@jJdRiéq6<}.]~qF1z %y=i2cO$uqZGR__js֐UTJCs/g {`cZۛ(uᥕ>[fϠ؛(򎕘q-L)gE RoF+kMf_1W٨t&KiϛUnT`РF{qB lr]Nթ``7?, KXԭmjF{HsŖaq0xyS4q&2[x(St(bƛnϒ[}tmtr{("*9boTPs+Le1ѺRQ5>g8a?/+]aceKXVYUuQc,K& % qpuTb Wݔ؂˶[hz JOj.OAt%d%Ӎu+IP)?;ҕFW뺹0u:GW˱]6j5u] | U UO0S nmPוPYW]ўCOjȺ.WWh8++uԡQv HWJpEWBuJ YW+C~Kѭ-޽bgՕ;X|um꺈u /6>r^(VgfB IPW?1xiZc?.+EBK_|M N5I4M{u+HW5*+5*ԕS){*+n mtS(yYW+سHWfѕq4NQWbTԑػnt%|g銵Rjʭk*hlHW֊ѕj׋NPPϺ:A]E͆{:`k)uS -ǩ cW]=ޝ˗,#YWcpqu5EitFͺzkut`cѕ:Ջu%SŬO+cÎtJՍV] -J(]uu@DS=_^CU]4doV{ߑl>EҴЋNJOPd=SO/~u%"Xh@5Ϻ:A] Ozqٺnk@lfT VB4}ܞ;uKyGoI2fd޺o)n1j8'j;DeqɄv|p^X:b?bMY\}}/e^JӹI} Qwh|?.{94pQS<49 ۛ M]WL/ZL> ORX_߷~:w+rG_q _>Ia"W W!{g,*U(Td@WKW~[=g!aRQk׷ n_^KЋJoP,=CoOXZswAn4iagoؚillxۻvjg?lj=;4SHYp(Zԓ54vNN0Y :;꧶\I}BK?&~-OQWѱӪ#] ppJpc7J( ଫg9\y;}qGGc8ROF?BW~S^{w+kѕREWB˓וPYW+#Wt2aByV ` qG%ӑLFwiM 2Y'i6Dۑ]?En` vg]YБmFWɮX&G"ub ?=h-O]WBIsvu4t%pJh%8J;uSv`:pZ;CBIn *jO:<׆^t%~Ǯ@}AǬu{=+X|qÍG>v56s]E/ϺzkCJml7\kzѕ:ͬԕ1>jۑs7\Jhcag]qg*BӅR>*γo*PHti5hZhM]B9fMMQT 0sNpC7E8u]%g]Җc/;QjSqۅj 1Mo[-󶴫lgNF);f1k\w;lN,p֨Q64r7XglP;*9tSb(K-vss}%'HWѕ=F@igթ芉 t`kUpzѕz;u] %)*; svDh RNPWѩ=VC7ts ZMJg]jtptC㱏]%|;qH8ʩ#Uuԡ׎M JP\rJhu%f]7fkmТ*.]8? 4*w^5{ӑ̪RpM7YВrYӧiCwJpEW_|J(YW+bHu+p^t%fEP)ʱg?WW|{qOjeTNPWHWu7UB'_ 62NGWԑ)7v+j=CB9$ub0tFWKB'_ eYW'7] Gu%̺*9\l:~v5 ؙc_+9|8Z>n(7q⬫Ɲ(F[ߏ]ck3NAW߼7^O t=wM;TX?^^]Ia-(n{'.^ŋ.V7W%h~ҟWϒZSX{P䑿ᳶ9=e5Ы_~KOmDyR/hmZ_"XFῇg\5)ȵoY&v|㓃5цo>c~xMևpz̈́{x&j?`={n"P|3Rׯg7Ȫx?V*{~a9>*D; ݲcN3p E ^5Ε|A||n;ͿϿN?Uqo[| tזuYHj ly~H#cKJָ04탭j9*7Ż}~'. ">&ޣm_aq?mt/6- Um}Kɛ!TԃZ2X嘜5}v:)gIhbpl ד>?IfU5PbJUY3SՔj`Īi`U}*O :[;cZ}iJU*bMԱ2#X%5gՂ6'BV&?$-ZCB:V1J6ׂH6hFol.fjFFG BJNIyg <L^__&hQJmІز,h2ѡL2:C(}rih*gSC'b6{Wߨ ="g < qQEz_i7WwT)΢C!99rX|O qjAˬ*UySSIm`4<thƠ!!꜌%{c>,QoQkNҩYJEWRHXֱ, }m fM6t;$jXRQ#$DH,ڌϕԨ*|^ۃŖb)>$ ^i\c 37% cfEʧV!uAg҃b:*D{j%auYX# V׉^f V |$kB` n-fkaxh@nm۝qg(* LV<ĪWJ|Xd.V8ք],"; $BfCsdu<ƺ2?s%P ^-*`"UN>lz9+s"6PB ջVAQV*AiH]x_H1>$`M.S\6AjRsfC2a;m}3ICBVQ7lFiS2 V|C@儳d' dmҮhA>೮7]\~82c#_醺su Da kH( @,eH@4x"W*:c &n0t',,<:M;*Ć%@TP#.%J Q9J@v:Gyld=$K*ڛj((Sѝ)KtiEkNXT5,)#J6/uZ~ BRFAƺUu3bfv˭'Gu!))"F_R}brTl!DRj, L%5MH Z#b #I $u;+,&И ChAv'2r\.>V_6(GX,r>U(6VER;L'Q /3Cgez}wnrXsn.YZ!+3ʪ "GdyY8Ed]БUJ`#eFJWCHB+Uʈ4H KSQcMݰQ|AaQBgą՜JX$rA5V^S0P> .%rx &ȜݗMBt6 E: oA;`em8rC?"jP`ZDb909Ċ6!A 1X]jN9mm4ATh# ruj cal7AJASQɍv"<]%\Fr["hg[R;Vց:]?{Wȑ Owɍ/ qrInc{8^bV$%Op&q(.5Ԋ^S]]Tw=5z%x(d :z@]ŠڌbFHX1W qT FlY+ij‚%~a<hC< Q:Z cZ&ؘkyFjRBիT`%1{@PʝI `҆'҃JA{i*N{erXVA>Օ7!M9RsUh ZB &f[XB^7XD10e#5Ev聭t  ЃE1>Dɠ8{25!lD,K 8J O<}Hna4F^uE[hK-"]6h0Фd>h]`bp[@$`ktp^;P]%  ND4XoQ TfU.< ]{D b\e!CPq^{{< l<^QbR%`+L[9imz矿~ ~-x) v: `AeK?C:kjx6}>[,H|]Yq>x[Oglv==)gs yXVX̾dZz]N׻MQ^`|I{QF&?s n،u<49R o\'bg='*"䔈E`AN ax&Hi,2^#D @B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L2bN 2u2Lr-_<ȔtHdF&5"@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 2X4'* u2L""*jҙ@Eʵ 0( L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@zLq@ q<&Hk̋gr"L&@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 LWzZ_R3MwjZFݤק60RϞ@zl?'C\Y{ĥRKt\o9nQ.J!ZU% 5ox+?y.9&`RMEp((P?g Pf1_P`A!~T40b rlkVEf\٧?&*#+<8,0< B Gxhv~Uzz߿F-i,Jp+DN&[fOYdF Ec0/U9R$Koa]`^'cĨ:fէhYx2"Pe8n.Jh+\i˹jq9f6reKTs_ gUe]UaB?P~q30A.՛XkoK{z:x[ݎތjs_l:6sSY;*f )>^j˗'lr:o.]niYe|sy_j5{Lyr!:Dsbp3n _}+5{xJߧjԬf258/SFMϲ85QߖҒaQkmgz>獿n?=x+45[yFtꃮXs.m}ysWR䵔܋hÚ\a̓"Cw `?ydwk4LFU &&eׁ[! .H{3A]sO7RKyNС:'[Wg0H[ʚG~,*@ѿv(#`(I4l( ۼ>7Ļ2Cnu:GDG_ In=*)ɡ*>*"%Csa-!íV?~=r)Cd p2DA,=!BҘKJhiM \Fy@QgiFe^= ܳF6j4v |>@-o:\7ͣhz7ˌEZK|+Ӡy*X4̠]qwb9Kl05Lrc?Υͻq}7^_5hVd}YF7c>i7ǹ+v 4Yb鬒{Irls*|4]SG^;mnP@=EjӦzXRǕǁ_(T)XU WPi's\*)3c)MŦ8pƎ`8P F !2җw8H兇y<BxXovǁ}%y8,ta_9`$^ɚV.UIGf+k4Ds| JZٻUwC6Wu`VT6\5-26KI^>Ì ^$KR' W>Xɣ1$¹-/Vr,0PLxU+)kfoM)zA TDY&1Z)ϑJBeoɷm9i/X:z@lu0U~4vr{y7,tj2r{U6&{Y@pfw]_Ϛ>y)m[ȝM1͊tյ+@hwץ׏fzbC.+h݌ww9>hrwf!ej=?yڽGk-wäwvj|=Ώ+l;㖶{/x7k,]5_4c|Uj9m5z䆱WͥTF׷ݼ̉k^|}$,H]is%l:U^#EkA ?z :98ɄR)V@D5gL+˫ٍuBpі,tsj# ´ jr}eqX^ŁI\)ln7j8}޷6=cnj"YVR>UpˣMǀ!I !/u` t>D -Q,B^ah^a jOɩ, 1;Y*4D2C#DY|2)sP>z-f}Wv H>\q\(ud:-?OWxEgTZmSDX\LYD >Y7$;fiB!d8cx"4=hy5cItG*) F.fB+BYZ^,O.\pa# +[27J*o&氻ῩFx̑e9{d`"TKG]߮_جТc? =&#\EB0ev_yg~H)3YLiV+粎11d6&J6@rs&|fRxfxWgY͢J"l}Q*MixZRمY=ܼO _),L(*MJJLRL7{))}?iV7[}5[zJYL:ȓja4gZ-^2ˇzU?~Ia#Nq'{ t }Ks3 7v`HiԼ-x[ǶmjIoeWsF=z7| > σIbļX~b$uh ;Gգ_0o<> ̫>1v!{51״s4<\nMkfz뮎8KEW/+y}jG!|gౣ {Beǻs02O*H=] >>p+/$.3iY#t$W>Kg]b]X݌GGM^8mȹfIl3#](dtZqv"UcZvmL6~:WTBKyOϢ/֝~Wm#9o}@6&{Kv0b,QZQ]O5)RMJ5F,鞙sO }D4ƈ;¸LwO<כe2 GhNքŴs#/^POe=,{=wR(0$tM>$"A캐ƣ{%4&EyLng~[CCܽ[!n1}{NҾP!Y/.h{OgD24~JSJCp/4e.RRz^r2|e)x+_Kj;;3齵p|qj舢O,@c;KQ)DڷOKa;$b { D bvmsAPY D (gBi5;FdC)8&T)4o d#o+.2(KJX1?tm$+L ƄmK tS528_˹ ɘi?c~E냽+\}jw1L6r!]Ob>ge_d;|BZvK9;v}j%ߓ*i^}aW 4I \^ (FTIBI :2Fmm8'j>u@y1'*X)I|9ա1mr.Vd0Yl%_KeyA!r)i4^ pP=s1n>r1ԞuĞnNhXm"mC#z"z']26m$kԎ5QTb ތj#PQ\32͓eP2X IZCV׳$RѦjRj:@H!{ ZŹ5ydcc":hnK!9l(+r֝]CbPu(˴JSAeJ7|㫭yFVo?; %?.'jD;I)Ů|KG[ZCbSͥ 9\MHZ5Zno;@,xhA✰V +Zl8m=4LFȾѳV1z9okLJW_H>->V!Q@%dgJ61Hꡔi0[>%AT*ME5}/롽a^bkn/E[F [C*`L*u+-{ڦiӱ:T ʆ.:@lK:+nӄ X5mjܦBRVi+YZ[=g ,aT5EU}iBLjQ'^hJ%Z -LCJT. ZDp*L;] C|G̣hoݹ4N5M*v*S8x`g2@\Km"H(c)Į@r5~|ơv2Gf5c<5kR)bǾv%HBPlyN}ugG/!Ȑ,_U5+&Db86J9D%5C2,x߾.*ֲ*~ kJN:Mg@XIstZTܽ4^mW?l-lcF8?3X TSddPE$r\2 ~H2tæ BQH7 >sm|WMv*T%x(%Z}T3:AqJ]Q.SV%m\5A?6zrxS{u~z嗇9"89[׍&4lB[l8(۷E|m[?~al0j co~֛ fīτW5=^]Z3U>eOiМeLg] +j^FOt8Ao Nɏ!Za =&FN՗XAakTL찒) R_jڛ0CPM>p:cIA lG_%mZ踒)X3TfG:ȏN`?CuCpfPC )HVx(Ɗ{uyk_se& ]r@՞]q_PR#8ܤAl>x̄S[,LZ"d9'#BWl"\ubDޚXklhH>t)+EgB9)"RޠZKp,Y".1I1llY?o&/qm ֚C[C b UơirgEG\qڧfAƘ"ȏqYF?dfy7Y 񬜝Naϼj  hʀ>82!eA$:_<9c[8ZU5SR11O.dțkuw'o~}xF.ZpDC-S,.ի-Л7CR UK2:;}J\K@ݴsR#>5jצmx7uMW,?Bkro|wyqtuM`3|7#ˋ[~{̳?[m-y%6ta [9X~-<;]'.YtviumNOw\*U wmnVt\_Eye#a'o繝rΓ:inP2̿9- 4Ƽxw|_e/YFĹSM+G'K|Nտ;.Hۓ<=;#8߿߷?}JyyO\.87m-]݅>?_4?޴}5-6hwi׫ỴK}^vv]2u,\wmb#b /oS@=;m4 VEOr$H \E@#치EWXjvيu&FX#}`ʹ 4*6y66)AI5e)Z"W8_eHAs 0[-&/u5=tU#1ge|}̷a՘ܓ=+{"F{2#ES}IJ^'+e!`+oVP֔ >^WMeZ2-w1VU(w rQr沮xYӑN%x\orvܚ{٣ٚ#B:%48XkxJRVZG5dVkp {'t.]1uq>G@BTSKxqlEMsU =<>P[ %FqY˲@<ɞ;sR)}{>Z=֯w? w&8ߺEu׾bw߾OʏgǹPk`:HTGiC`q)};rv췦{fUj':VT*ceJ+͵,9GFHMPs7_73eꙅ2,QDm['ݒ6o65O[}^mݤoEy~x~[?1 $1'PGQˣҕјʈJDk%u6ZJKlie 압`{!aSj4x (U$S-Epf؞ݛljU+ {R[Lm=rDtJ\]LS2`%D%) iaGA\W+m2=E "H-9>bjUAu4eUz8RgTpl,DED}XDtxc<Ҿ6&pllZ)61x1 BJ1δƑ0Ct3 a $w6\VDMJHZ-1 ‡@M hj\\5zɾ\)^^\|xuZlEðG@;7 (pnK(BL@18zǞ&)b8ReBkv$WTpEj;P%q5A\)ƍޯ75f/>ݜkr.rKlZQ8\bZӷ-ී ~-[p86朰@|?[vvu-y='nm3"5WBJ (.\YՅ`Εu9o-w}^-uBsU\M= tt]rOUfdQz- T,A攭l0rfC]@`ʻD{l3987FvQ+1 H}qE*!j <\\~+R)IMW$XvErM2HZ7zIɸʸz6;^̙ c'8Nr?Z=#]T4W"j_s锵 kinj:=Jq5E\ Jp%(>\t+R HrWSĕ"~ISB@In Q2L\ `*>vLJ-3'iLW$XU\'R܍Wrl2WJ nYBL<-+L*"05;R2+@Jkv$غdpr-r3v\J즈+z 6\'+_".NWV3 6䲛ZmƎ+R9㌫'q&{t *\Z#ǎ+RDqElIe`P.ɤZ1g$H%茫g+0gLK9U' U7j`瀞ڡٺl j_s$pEJW(~VZƎ+RZ̸OqO"?0MTpj-=H$q0'P-OsRL{@ݽݒ+*!L`41M*[4p " HW7V+T;H%Aqw&+%SHIf͎:=v\Q2&+-@JB)$wlnj2+#OʻBBdpErHW֌>$6]MWVrR Nhd+Tk;HWĕJQ0IW$$]ڣo+R WWjGӫ9N: 6l裴'2tS+7Fo:Je\kz $+Ր H Uj&2&+CJ". &R*xqS6cdf\6|qL{wo7-ٍr' *iu*&vA04eLOӠv:!\`!\\RWRf\MWtVT.aبCϚo#sA$ t2u&9X_.-P{^M\,>W]䶷ѽs#j 僔A견MUEam*M,kW7|-UY9p7'ޜ,>\ *^.ϛ>^-ջ,W,oSE][hi,K_÷m~2_{{m?7,FbWy PT7't//ۯl  #Vvk#u!i﷍ `~j,;ס/ :X2)V~1T˫;\Z 0 Knɕ/FJ+)yW(8|wN;"q5E\YÙv sMWVʱTd\MWNYb (jkX\\,.uJ,">~X__^,.=ȭ~ؼC(2A -k^/mJEBɢeޔחgNF!_Ys{>-w\+{BUq߻ًf͉㧍*8V8&RFڟ\\}Wm)Lj7#^_|ӆ_hT~\|3muA 8&`'\>bv:F݊˛;F۸gsr{NgN24?fl6h6[z[ş.@*}4@D,Yڑ ?c}k+ldWgE_E|ٰO x$|W#e]AGx@>VxvI+tO>_`|;<[BvF.Z[4ث_ 7!l'gG<̓mu=!lOͩŽvVOBKwyTΛxT=9__L=f\%!dEB^:}ka a@5\."8o6쵡 ~zn+bkB o?Eù_6c_W r]n?z=DozNgWGw r >\9eK4X4Uw)FZ9&?üqM8eZ7{T:.Dž+W:j_symvvyzU\Q7'3r_|h{%jsww^dy]]0,2lnQlW>9]}m~F`w{mv%O{"ILJS*,(V,  QA{a-^l?Q1/(? tpMpgvm^s'gj{7_{7bw/.sd~FQYj([h"!꧊U՗f7?U b]o7, \~=It y2SQ! RjG"ׂ8\;HTJ2 qhqXX}#^5lݝV~ s:.kwzͻo;XǥIߗ啛/~Ac)$TƂ" ?I7=pwp DYjK*os_2OvzAf:=&em77o fy_xJغ{rn&5)iqvqJzٮ_fuǒМY,&YQCX';%8Z#oFtasX9]w덹K..@ "d>܎s7n`u҅W%,>}qbh})b΂wDðqdh_i]W%)aM DТa~dZ%X}ZqdҺmGof37ÄdշF4`z[cYTmSwlGy]ojw_w{K(Zs ʇ>[kl` ̷ RcJ{Z1 `I^qD!ҩhhL*Pۋ~ *?#e`_=2xMؠ zR?˽V&- hKFߧV(w@"u!X3&^c^ɁJ}osem^,r^D^ _jhr9YAOh3wӇ.Gq/FW`' C0]XnˑM[\ 0?.ϐ K/xӖ`mSɆNLy[҉qT 6bݨ*}3+.G2+َ[9(N`/5&e}y3j&&VQ`[U/dnne6K@x5]ԣX]~V}Oav}!`dGi\& sP.8ہ.re-ޏB0sW?w#۽\/9c@Kҿ/_$cܐ-fA^co X+6:6(m!^3{uR1G EέRȰ, Z)"VB"vFaw hlKCcPL՘ݠdZ,Eji05TK&X LWUlgV(JC366qϋ{K[Q(u<bdym_yMOwv8iSyOOm1*c}aK;$ sF,B0G"`"RSFDD b FрG!PG;b?T?`jK^?ؕ~~Kc e & ¼&BP5;LjnDh` o3ǻ5+g3ӛK͖jT`v+bL2a@O|W++\؃ECၱiag6(Y8SW7}ol!bItTt!`8n"ބa!b1+.94^[chtsY4a#A29HysQ 'W={{#;`Y)/$ztQ=4,T=3/S`B>7_OElF『^ i)64nB${pԙYo-Z%xPy*=QsRPk- XDeckn$BR\"Ч~)_+z?f`a\  ,J#c1Fԛ@%Jv@B$jrzNO;e#&#+z8:/9{*!Np%°TqVQj5rW݈gቭhWi1DKC&01$p9&JgB*[.Χ8uUn!KE`sXH#_ .fǘF:J N|#@Q0D}Lau_xXݹHGortNЛ"mLǶQi#hMɦןK[K W CLR'˘v*rN%ی~׎q\!odICGla/dgWͱn%aj|ʂN ~*rsNPq!诲Z|qe&#QHYu2L:ƌL"^ˈiDk453퍜`* 舥Xc~| t7,}_?ď4`|><%eeVvbZ,jOYً08%YO:8fq1/ifL9K>h:aB-:DqM\"@6/Q=-u |0&੢U—=$K )'jוYVI>Ddebgopr .qbEdEEI_HJ 6e,kX,)"@'e*?_7˭.+|lm ߥn l-w|fnSDYܭReC^hI[;t^xbh]Jk б,N*Ƣ+{I+r2 $dz:Q&b*۝:Sj{Yquñ{gZf2{AϛƇ; tr??,|O6cߺ-md聎"=)jQZj]cZN m<{;wpY#6}E7B*{nC髏ÉLBlnji3me,wz‘Z}:'V.:uکI98PNI!uHq+Hf*m8.5QhX{HoN(yar}.J)q~djJ>Xj& ovQd g<\j{|&wy"B:2gXk%C"\PVJ7ed*s$. $!UtP _bpK :Nsov>h /J#NT"o,YydIah@'0 LY,-,xGIHc\߁s$vkxfKKz[wWm(D_")H@M5fy#RBLiR?=$}|϶h?t \cklW\"8]8{ˈl,NLԕM&9ݺ眖tInV"-)7C׺3hgE&uFS2cϪGNã1xl39xƳIz>胕ƖdAˈ r8[aXFEOk8,x@/,RH|yB=}.Wxx(sVCR NWor5aT((up+ُK쬎7Fo%%X,%؍a${_0Rt(bG~K4M`.7U$-wfӤTWRLzIf$]DwzM7nn[i)Z:m(dR]>n'm|SU:۰Il}uh4O-o& 6/j\tj>]lWMgjzOjYJʱ[uR74\\Q/Oda9^=A\ ֫ O 1Jo IބVHK!j huZIl 4'v;P>ֽd9@OBH6I-tǝV+T Hki2(ҡy|-΃qU /ۅ|s#Z>"/[c."?QW骕*FHiP%PO,tV~&EFuCRfO }j26E8I| tfmQR$/vCf0}{w} )nC%uph3)!N ]J)Ø  ĤH5H}F3J?|l w%=zLFNJ61&2h0C֡qPL:Hc9L;*ژ3:~} [+݈`PH^t ~6uK8ր%elAjQ7I ^"r!Yb!Q娌 `4A$ &K]ROuE OT\Y*w3`H3nr|t4MFF(3#DW{dUL)2iB(b2)ѵQϵ#G 7,t$b Cf G(u4K%G^咭:8ljyOQ,@1)'t\i"F\ @*rY2eD,sq@LAqR!W ,%˜ 8uk ξrvZJKʚIX=M޻,-#E LF1\7G>*fA00Ҁ\Jld'SB/") )nNVaEQ;'XRw+RCquW')m7]/;c}_+ѳ}%]+ Ii^ωfh$=\eRhp2$J#hK)\Y45`D9s$R\6e"yL.g-&Ídj[jRVLF FXhPie$upYcp+b%*\-ٚ @!H6N,Fkë&#O0"V{YDsXeE-Am`k'h0e1@d. !b(4 V[!VC6d΄Shp+ Db҂t"yDDҫdZ6JUzzs{ӌJNhȑt焄[:a*+;<#c3jY5:#>jC=< +W"Cqrď7Xrh@.Opbя^E?[+*!V/gT~*®㬻_p~uԧo):zMG~_Wg' ]7k * kwȗQn*/*g+B&>V `n3rnvl=\[`!:*u͌,aPJzKKwkl)a)sV].VC0!uJf4ddk0_!FRQNj ż<_k?He^(yDvQB駱R 9Ao~u3뗡Z}DhE)7唶VJJڲUI6% k\ј_CT2n@΍SFiAOG?4DcPJ!IRFIjADςL+ceSt<&/J2)w$ɉ9PB)J5,#gzdrȲ^1D DT\jھ]P4) kPXe:aU:Nd*;Pij-u&'E@#uPR}쳵)^1Q5UQŲtτHoLI9BV3=c%+BFY#bFtRĬlZ( QDqiTO܎c D)tJ":1d! cYgM*["#={^IW\3͗\6l,%٤8NR?~kqӕ@zßiY;)9m_3?M&?`㔳WN}*1@? I4mn8+>EYlT(&8㒉 v&$e<~ Œ c@n ChT Ff- KsN߿+#">> S㩇>wީx\Έtgt:+tzz^j9`原,'薛?F7lmh8+6`Ty63nӋk\T-?v ]-i?VU^s4ղKB= mNpJ1N3wۈ!6˯ >Q^,l:tϗlm .}FʒLҖ&)En/V 8$f^xz?1_JjXS]\Q<>F'W/_^?/~%{_x?YPңEP|q꿼O թ|U/o&祖qdߦ98[M@Lpd=?'^ 4ue/k![Ե^WwGvL]#ɝHqX&=yg\%1Œ &'lK%ꈌJHOlan=16~q!C"#Nr$3 dUf43 _Ζ9i?f:11([46u \|WӪ^rn'9]1,0슩}(bN}WLT |3 t0p⒴WGW,^Y:\};p<$vD`ŕO< XZ`pR>W\dIOs\ZC*Urv:t>zZ la=wty'z GWpe;\m襴 V \աKkvWUʕPoDٜNxjz?:iUK>8Ϳ}NxϋovJ|wSXa UT*Co= }axBp]uOggG}y4ϯ;Fmr,c>C>y>bdIgJ$+s,e,Z!bKlLR6LdjlPʠ%Cл;CзwwCл;Cл;Cл;Cл;CCл;R(QJNBJiVBJ ~%Jv4Cj,p<[wQa4ಢ4ؠĀ~`'v"Y/PohK9z-e& [DE@vDD@:cL) d E EM߅? UX&))_IPwgzaOyD)DCI)8}vޢeim ba69?) t1d0jsL 4^46Iyָ݃iD 5m0s{ rW̓ۇoV_ax|YQKz4pmn>.dGqkA 2B Bzv)BU D EY$AZ0섅$꡻,-mKUM>gOC>DGoь?W l<9}ݣ_<_O?=8]C/"_|}`s&W95_kżܪ>g'y6]qjQc1^^r?OytqrmQ ~r==zx^+vP+l֐[9h)jOrESJuO5ܟ xX䂧yPp(DP1Ps('FΡvr-ӕM%sUd2 ȎחL%4 =M|-ԔB蚆l>gb` NR)$b %I_jK'ؠZǺaNMJuGH7- k[X6†u Hjw)(C 92%+DTC)m3 K>-P̺8ZW){eHXl]MKt 8j~]KF <ɖnچy]bh֧ЪN慐?[J*;yu.uRmsVg'% & ,%< 9Lh]cn&Nӊ9nۢiںvgwiLJ(* *$V'Mt` `"Q_ H6W>%\‘2TҨ\m$";zs?ΝZc:9NX~l:"vDK "ImB+\γ]kMLD`g.*bИRÖ@նebm&$c%$T )Dd#-0WI+Q^gT!m&o2?85Xg3-XgŻi!je2n( I\@vj@J  ]Ž~ZiǖxZxqoM?Mg|h #Q%2yEpqOՏVNB_z躁, &Ϗ^\Zz!/GGfO 8W~ş&gG?=,z |շq@9z%tX4|z6}78mP牼wF*` QPUwӞeVrLCQy )碄vh1:3)zE~[`Δ2R04C+JLn|(]&SdPУf.ajH+d8~a#j\q;}5#Ѫv Y !:]S3r_ծa b*<}O'_|ʦ)9 \DI "t$%kAiL׊`dAEUcrLcè:p֛c>NpY !8p: W}jN3Uz%)-U&]c^+'d=eGk d#'dFݍ.W,f4٬{b by9ɿQh=aU`Ze|XLJU)þaLcjg'2^ZNQ:n^7蒱6o2`4h(JתѦxbn⦗t}6tL$ !z%Lmv $$@,[?uZ ό_}g7ޟ>.~_i14O^޶UWt{{LQh?"NuI^>m8ox]<jT@`=FAKq)ǑϽe;Y VwlG%[>|tQ{UtdWtPLټ"L^Zc!BvWv`>D:z*(kfÂFU}JrŢB (Ĺ?{=\C11yZxuGMtѕ dfŮU{w;S=QLλՀgsYҘ|~{v:Njw-†4$Y !*C#/ZnnvQܽ{ͱ*Ѷ14a|'g?6ร1!_U$clTu\@/,BwKDX=8n[[ԥ.]}r.ijzȅh%,&R@~R }fԤ@ѻBoGWkݥ{ݖgf;wɅ1N܅9p:w\/}rv2yE i"o€.![LV:S#=nP44qb1tC!Zi*R'V# L^Bjn܉ʹDmc;s,N d_>~3^z!ko,V~`.ptxD2bf&|uGOȶj!X(EĮJd:y?nLƼe3 gwit[^Yf~K]:T>^ؖ N4jw6,v4qE;mbL*Ie*' uSܼQJrJQ7&8ͳJ');e' }ĥWn{cڏӅ:~5.GLAy*v XRȊ0hQAխGn h tuJX}/b`cC UbbTL`oJ 6g-ٻ6]+Ϋ*URs\I'r>k$"P|u{z  x.]b]eZ~ogz LNy4.jg1)Qr Rq*_tV2ɂMXPW SQCԼjTq7uS>m.9OnlEqU>u8"PP CD-|qSnQp#/|Ȇ >q\$x=j$+cEjEݑq`'+'kpII R8ɚ3Kq& 8QrL*’u[G {f :6.rb oXr[ +MMT9di6\3pa:9c8'HHE.\ݨJj$mӋ@" ֧(CF@ZAeRbpLK\XL9ޫZZ~>jg[*Rۗh \9=RZKNШS=q4B:.klNtlրTVyAHO8(|[B8Ţ*4B>zU;K[;"n섋=k56Eu׮&o겏}DJD/!Fl&)HEol!qjJp4">.+eۯhj+a頗 z7 +w",REkG)|tHM^ل l^>ږCs:΋_aMxp'2sWkdalHEeS٧BoN[`#l`믙v bEhںk F$Njmi qCqp-մy2J#"=*1:v-k>1$SIP6%T !LEJHuI6x-PZUۤ /f83owi2M"xMo=_+O/B(NN,(n5hep4B0# kEA%J͸s@f@= љy P4$ohT !"U(=A9U6F h ]9Ce`T<j* k`/`dNc{y ^OmQ'0T!ⅵٳ ,Dqm| Џ8OBt;&O̸, 7v 1YB4X J8c)Up/UwdeP(yL+5:?~aj+i7>;_m-ಱuYjq }S5k%‡8ӯa)EmV*NfKMՈyye_T픬Iz=^.][ r+Et+nm6kWkQCקkAXesbQz{؝َJWev_ڛeJlݿm'|ۨy=0LԷu}H0Wc:9yZUˢonx_9SRJZ߇w^h ͣ32F*|0M8ZF;C Jb; w}7h_7}_s+\ y&Y%t3IF8" I$)앲>2 li3%a)h,m8_^ۮM\;D_PH)|L,ITpAuH≏)Gq]y g0B R'K!qSsQp*`eԇdWJk~k%0MWDJQ7Crˍ]p=<UV^3;\ӵ܁%v#ەGRL@1ֆDpNy&2#,OVi#xpQL7Z1!&b ̢/IUe JDa1~C=4P'(GqyT^1hҍO 43K%"dPԁ%RߜoB_#E48deh- p?Ȇ&H< pU阜 o6Ҝ]E(&鹴6"F36@=IL@84~ }ct =cή  Yv]J'ӯix6^wat"AJ#ӘF1_͎C:Gɏߑi:8o[?x?WY'kK+? Wdv^ ή4(R(0Z8䠵M2EnPJ xnqَjk0{վN억!χԓĈn 5TS!e] ~xP ,nKfnA{u1˓4<j)j[\/{[s]E셯vz"H=:c':{8?:AF[wG5Bp @^kf!jeWBQ}|UW5ϋE:`6PP)Hl'љ3 z~6F!uQph[O>$KY}Wzoϩ=)eQF[ rH4KTQƨ&nzLOW9o<*ÕqP([{ZdTNG10jcpvo,k;gP󊓆J$j59N5Z ѺViQbQEArQY!ҡ()j & gE Q'hP`H Jcp Xi j4JnȃV$cPp' o5G8Acw_XRhP~#NScN3OtGhU6j* vVw2|xU~9x5Phj!j4DHFkY)Ag(IҬ{C!9SrFC4 ȅ[m%T)KnQ''69GW3WuVPC=mS,EeB%'{VɝBb~2-y,VT~=4/)k$y{|g;.aW OXг~BGnO tZM΋vvJ U:āf辰d{MeW2>#򢋶F) nres/:y-,CoGYSCtם w2ZINW%JҕRK!BFU +t*v( J B ]e*U+tvB.vIʀ ]!\{]eUF ի+DH)A/LWe/]ܳC)[ծ+ձCOhC:DW(3trhi;]!JtOWHW Ct0 Wv2Zz Q=]]])*ܘ֜֯hE9~7㚟YchpV >?~3N拢{sSwwļcFnv&d#`NhgX:å+,hA足tF){D2M;DW?VStJhi;]!J{D)Җ;[v Jv(5JJ%U+w2\cjzc0\ЕRyO+(!Ѯ2\ ]ܪtQ ҕ.tU +t*h;]eZtute+ Zw$+t2v(y]2{yK4a"glr[퇡e/~J+s]z`!ʀ ]eZw-0v()j:;@~ޱFHr]{@u!FtWDC{ؑtFDOH\Q):DWp`K) ]eܴ2J=]] ]ktu7J5>qd7 3wt\tNa`u"$Ԁ}'R\2K.涼gOO5/M&E]@!烓Eky|π_]}|t{^]a</mt\ctqvzԟ1|5$7q߸UX /#Gq9<=_W-=+NzG/_?(J7>O&'gD.ͫ]qt9?SL L?_ϻ{d+vųj'M(?i~*N<'b4~:@[,FNR2gc [isY~<3֓,5-9cVț:IdJJN#]Rwp ӳooffH{CJ/GRlrv1xFŻ$?%oW䦿vɷJ|J7/|>pxW?o7>?n@ߟ׿h@x9tTWI%]>hn>#בZUg˹/'oOCohk:kqZަަ7knYE nk:ڒG6Iw>S^_.a1ӝizӔtmҷZ^L{}*~9ΡΡπzhKAz >&J~Oq ֦@ ^{ħ2Es`{$5BYB#l/n!zKmo%vIKDs Z5UlUi!/_sK*]=&Zh3أH':A3f+)]-ܵ~1qZ=݉c=:4Z4_5k7%\׿Bz%|!Ab!Vjw/>aw7_n &xDǧe:=;-8vXG.\$Ժ)צ(\IpJiͦjB1ȳ S7~^Ҽ 䀩W>^/\s\_?*)gOAmMqTŅk5GC^{n@CR4%xs$[ӥGi:ҖK0{f)Q1Aҋ=W/L2Q*}Fr#h!SnI)q޺=A[Xs+f͍v*.<ĭr,^'Kk8haގJWfk#'m*qYu5fs["5T}-Tz U8XG);八ċtrSm_$( NWWL(\#$g9xYFqELʏ_~;׼8juJ&5SN` =8k#zmpD(hɩ̭U\B ؒj{VcKڪv*LBTK #Pjغ]X1zǑػkdj+`\Yz }1qGg'lo  4xPDq 6_Ӝ_~e9o:_zvW1ZUvɔ v "D$ۣSwBo ͊4o_c`Zr6=y#~cSb橧c^}±{r]mkHO}׷ Ƌ឵n쾃qˢٻۙv+ށZ_'=z#墓 vY9tH2|o礼9) M+aP`ܷ$8Õ??2\y/uYH\J~"5yʚd:͡LX;k·^:[4XGQ٪MZKͱ(.׬MbBv} {ZVfsX#k~Va aO3 @Kwͥm5 .aC0? f͞(vv;ӣ~p;{%&Ln'bsxኞjKpx˯|߻f7|>l9oɕծ|Y.GsgjBWPPb-l,ߑ*FʇspZf}vVKuU} Eg -# {{SQlHLUmȣ[f6VJX悊׿6y_Y=iχH?|~ޓھ\K\W[Cjph({WcrȠ=YgVݚjfyS%G Z,oL9suz7ohR-'̝Msʶ<lV="QZdK6B]2)'+U̒ϵ`250*[߫՞U  Bɋxz!?g glDsȖ}IYfLO´<]k{-=&B g4v炅5cv.Axd:ёBŴ$dUbaX1{/YR3C5 dgF= lp>#Ə3[sa:sT!xS1KDëk=^G0`IHX`qѺAa1B[zK8=٤XWc%3dr&)y!X(j A7gEëNU-zds!gbZӽ$C@b:3v";+ixXrvUBka;jyԄ&##;$}{ńbtNZdք\ՐR"Dvz B=Pcօ\#@@SiVX //'25j:$g DS]U5\UcVdYa f!q]6dmGDFȗh%~N)bFn1x-&W% 6` ăւ&`:^vyđؑ5$JJ"<*P%nd0goK'Ǫ0mV7^BsWuh0TK􀹮\g$j!. +Ae4mpDRZfa=g3*P+JX;wVHcWAՕ2',AQR\R=!nAĥsƷ K-*$D6p( m<5< VM8@\dIaxy'EI< ޕZi^d BXg-'@CK,m>8u0 c4K)("#I0Θ, lL^ /% *A h*ԙ J8 9rYL!Ң%Vߑ@6()e$y`keⅩ}:+˴cI^s5*>GxꤗdmzD-x& o 0BP8|0XҗMPdUIM^(G@*0 8d⼰AhQaA3"=h5+4D. iy]>DO);j 32 Ksl GP_v Țzn}G-!8Ɔ..dP?3 $ cEmxY+wTXNݏx_=%ҌrLԵ .FV{#CSXpQpC$*7H_QrR׳k)`*H l`: dM1PCdUz(p:`.- Rʶ5`e2 D0Z̽N 0֎ 颚:4j"X8̬ jPJR<ުi{Y"eZ !-Jw a5^,d@h 铷#KP`f, qmGjЬ! 1>'Wrӏ'N^j中cnDy̯ @#Tݸl]RUK:0){LQjJfY:F jZ&9W5AU,/ OG _ ·]fsD4j8npJxKd&rx*"rxh!;guJd S :80ddiFs'69R< ,-CaD !+C j*R%Z%rNhٻFWVA6d㍱_ zJ%Kj4fSԃMRTq,[qnu9&xyce腝 PAmѭy)U,Jf'UA`G?Mנ-0 ` "eY!Z\g9EKkL0R7=VӪT gF 8IDDY,JLTЁH#\5%i\4ҵ5HۮAp^ВJ"Ћ7FPٗ9H6R fxX@a}˙#-+Zaԋ!WAX4S"qFd>Kx޴l!#XӦφbUny2_v)&T,Ҭ$БDSZ,ٺ(`r *\c)f\(z"!hTv"#DoaL0@5a^:ō#! ĈuRt[4c&B4= b3zó9psEDO^K~24 ӛX-keKQL9 q=vw5Gg&}ϨqzfO~ztE+" es*N p<ۮO?/MRu⓶D[`h#VqVCgw|cm;g^q,i]7"% r3S|aTpxg?w =B`gu=pB?Qc=9dIk3k+:#:V# l2<8fOP[ %O `#:vɹ<Un*R_BOHpGH/.>g!R< 8,;}i?CguZV9W&y8`m.9 wuANiF4m9CޒuyUCӨ8OV7pWwOWOVWy35)9qlv zț/=0+wb61 }~#!wGg^Tix]cQߧ\9IhrJw-)Ō}MXugO7,;/sC0#ίĹ^ɪsg9wFΝsg9wFΝsg9wFΝsg9wFΝsg9wFΝsg9wFΝsg9wFΝsg9wFΝsg9wFΝsg9wFΝsg9wFΝsgz9w,4'x|1/ (-\MpXG[&Q'*HiuRZe )-4 ۘggn}4|w:~66 of>wSx@/8sԣBX+-*4(hn?JS !G3sPx _Đh>uEE O#uNj,u/iuvpUyzi?b773hE5<$:i %ZY[~hOJInjp@ܻ[J rL _KbHNέ"%FEִ<b#(9gi,dN̜&8fs>̒An $. Y]pGrOU3l1_n]i9[_fr=Cm[7d ֙TtJ5)5x$2g0K@>1{(h 3Eug7 \gnd#"S=_~9𙲢j$M\jhu%Tyb5 :RV04~p?s:^7e<_o,FӮy vD[Q3-, ׮b7gR Otww?\ٟf?-V볋m]o,|yZҍM-)6pSVon2Նt;r}8˽Qՙܯgwjht'0W_e="٭$#F&$ 8rD34/~|2? qwW,C*OB41Ҽ!7 ns3u?6l OlR/hVad/Q Â/WnҋPK}>(žvyAOFXWl]gn]9|w艌vv%D5x[Y2[p֚ ,KzB.Ǖ& 7S|y;.k+H<L7G^xݥNZh! [a셒!ޫ70"%tM6I<0Ջɲ~2 /]nفK܏=;t=G[/Pl |zrcnؓFdǞ'c iI#>fz?K-lx]%߿#~EU )NncיgYzs:kׇV_gSqYfe-g݁JI̹? wIe# Hh ҆$_fR oD#A?Io)&eC17MU[VbUFBVk< JI ɍj \<xrV„}ׂUK ~%}H\:((E?`@HR hxZXT􅙘PR,D,Ke/<'aRhL {e=.Dz.Nl sH Wf^C6"7m^L`m[tx(!ՑCh%G^Q=+.Xh5,2^!S6"x5 (_x@>0`-ĩ搦G283iUAŒ5P|Uh2M%!^$.G(tV!T1 eڢ(Yjs9nù -< F;T%d* ҠERRZb =Nx@6aX͸M4 QMޠB/e}tꊾonI_ػ<Ţ/f3WF#2e=QhfQl:*p[,2".&#)=`,m;q Ў2H;f yWcgH!t9BN7xhs^7Ռ:`xbhdŠFNq:D(co0إ=ЍI4H@8K $O~ȃ!2C #RFdC2̹+ cpcE*dlL'R% fǞC Lr64 NVI@›WT*-={S^akH̤A@ f|t,2 cbH5Mm0ZxҘ+Gv(3 ZDPB骅 #Ihd*bLLv;MKQcSUwy.j HǔUF L9 S2Tfx8r78%xK̀ś !u>fȇty@%C\#n\nHf*OkT*|, wBA)W(MTMV+*ϕ+E@3,4E^gFyceȅ PAiQBaT(<Tg=k U[h02aQ=+NFМ"5R&xndjZHU gF(86X LG ՔqO2{ | 7cxYk}(qWqF;7a+dVP9,'CFʔSPY_yQi5Ji1̙|J[[o9o:'jR8|@.Ӏ9e˛l/μ55o JI&NO=~{28_?M;!ň+#{ʳ/AlH\"/F♝NA'TTELTHyf;@,gg6-DVu=I/0zt4'l4:T1Ҍa^f&YW 9GW铩!gWo T|G1.d _1F xX1ld4&*Z{5Y-]2q *@pR\ i d79e2ή(t J".It{9/zm<+-@%&.o/Wx7Ke^OCOr8-%lZ0_j)5|A_.˧d22=FxQV[GG\Ʀ<9ԣ| rsb~GԹP\P,׶jGmԄ3BꨩKgT^__Cț9<\i6ku(~k+ԕiPCC9B;δvk'99âO}^q<*+UxR Fk_ g!,18fY?gS0/Ez9oi̪%LMBj`0i!b*o- 9G{C[ rC¸lk>񚍓;{nw Д7ubBoE;oG>oNwLj}?lF#f~n9!kIRhO,c.%ƽR8ڣ0ص/g/lhadlfivA[Scn eO ѵݾ'e|ߤ͇Ǘ)?B7s@?^.78bȌ&QJXࢅy|(f+(N3@ֹYɪ,QHAaS+!>[$ TEySLkxĎWb%j7Fm8j= @$TdP}H&9iTE +e JJ&ژKS&ʝn[ؐ\!C2䊸X (%,Z/kfpLi#2 sIќ38"{D<4 XM{ k=S%E%!DS3Ό'xdmb>T #b3s/ uD\\w5ZgQrj\-"k=.;NI]DȒKB 舄B]P!Xʄq)l'-!?1>Ak+G/5wGv(6I_W?>GǼq*{![7XRM}!A hp}Swfܽ@ψIgޑKi*5m$9i .ǚGt9 a9GSmtB-t+swqyN{iZf۹p$|Ҽ@)IVC@npLׇfgWtLzUӐ͎Ͼ}܆_E8Ae!-4&QO5pk7#I:WʌL \HO=} Sjr>%yEE ,\~·Ѵ#UAYzyl<8cxIw ZrdiNQ^f]2qflХzs5jxP=IDa_IXSZ֤.H yD.8B1RXeD` ZJʦLR5/=wViό%D˜I5)ROιHXD0>ew $K)j3U٤,U -u_/ڗ:dEzZݑ:)/^dLesɱmQ lQ$҉&B,%/{ _pNka$ߖ]Z}.s)_yO;g?|// h|&QdK&^]Wo8UX˻xݫr釻v0jܯЖ_m7WY/OoX]FPYݒ{K^ܡcumc۟uw1 h\z-wQG+ե)JXn{2M(z^l9?~f{ݟrU/o.6Vv8X]Hu o{s/Oo2|M料FL~ ɯ,7#9ʂp?DKuu#˱qclXiD}Sm.A5_O|ӏ.iO|,S߯N1pi{GG*mG#ޔLF{d˓N~lX S8z?oi?EYėo.~m:UjXo\d`M课strfl]ĊRRXEwgIĒ|_=/>ׯ_^=g^|9?r>t-hg`?}סxW9os\"r/?.?2+$R& ή=r{`52r#_mq}e*g}oO-o#y[bPBjs#Gnq70#;9Ǧ4Rki WrY8VLviG{CCGa,nɦdم F XqNl8xWJ]w\t 3vǑ#שoVհivtɮ|Wr2_a{8НRï{9ntgg+_5g/.F>7Ûwf>8';̎[/vWF6F |_ȂA&,ڡcC=똭s98Z< V j` C2!;$_'Wr\v7:Z|~iVǏ.nWub܇s~ql-#>ٷ_/)f.ϋQ׫.j\νkw'8ywh .N=v`_֚C,ށO |*J7,jl(hlw򝆝L & ␈I`຾ F 5mv}d37xk֠r,BA@MKԒ07 p M :MÇq -v10c5ɘ4O]16{B#jM +-m3!flpl8"?w>@v4/fytXOx n&m chS ߶v6v. XINmkș[r|)W_`SR<#=3g<4|8@ELI<z0ZvcBҥ:Xٮ=90vj][72qq*vw]Q5[^WV]ݥE HWFWk] u%U]PWCHW] ntZt%T2Y*17ƀfdrNrXjzҺMjdqzrhg5WwD!&o߹~;ijqϕҴբitM FOjz>v>i̎jt%!jѕ+ QW>j̎85\=cvB\J﫮f HW VOJpQ}衁u%f"AT+z87bAV+Tu5C]&jj-Jphѕ>t0JOUW3E$iJ]QtƨZ[ؕPbz4-M,`fb]ᆉ[Wyq`e*l2tUWwzHRtWU+:!6+Tu5C]YnXo\ftĸD9o (wd"M 5ܤU)SśP"P5=CMc ܬT+ jt%iѕFW2VuRu'5Zt%dKSFS+o lě]^]Uy۷']`aaOzeٓi돛ޞwgO^[gsswǃ&/Ol[a(#A~釯khth%E]JY*6sub\,]’~\ZCԌ}gԬ/jvuu=0ti.;KStn&ZsOћZ|ܸte[b;{vN6N;4&yE]lOvaq\ZBXz[(]]vֱ|]q_R+^^J(}"% HWȀcV|] eiK+}eԣ+ujZWBX;GWv˪8,` <ɷÍ*ݦוFWKIYoMb`+~gvq!-ZgKוPb 'N88Fo_ntD1(4zc\TijZBtM FӢjz>vޒ4f':Σ1EWB[~'X(K ਺ڋ$sƌ]1?pMW%\9W7e/q 2:5\rSkj2}j$+ƚ^݊ފ*eU2{GMV `lR`"5]lZBOei Zj{/]੄Y֢] .Z_2԰9*R@4!Ѩѕ&5bZt] a1sU"J^SxcI}Jp}Ԣ+T|ѕ۲ݡ1h<`?uqnuG&j]QRa= ]V=`"(%5\gJh+,-]^teM 銁A5\@-Z4J(T]QW|7mz#3%]ۗAEɗtᒚV%CkZ(KˣU5M;@41=cvk] Cg]WB髮++ҕSP+5ZJ(QWh58i] nZtŴߺJUW3U֕;=3;vDyі+LuaJ6vOg<o&)+ U]PW: Π+ut] eUWFW~˪&&, ?OÍ]e:(Tei= ]V=HoJ'&hѕZ[] uec;S HxG6&g,`Jg<(w)"M30VN46BT[s4[/yMb`M\oJh)]WBYڌh^tRt4vySoǘGo] e*C(:lo\8u}EIKj9lh\x,/!b{e]q^M=Z*ysZ%p˰wu@Mqb ->,F(]bϱ(:Eb`JpVP2ld1j :2U+vO;JhJS&뫮f+2XM] nZtŴt] %V]=]->C,`|o<0,Z7iyېPuuתk^wO5 7 jѕІT\ ueZW &j%EWLo] eiɅFjV]9xb1x.m+P/7ڔ(ҴǤFӂ;ii5-@U3ԴCU] Zt%HJ(krhJpNRbJ:0G] JINT9vyN1QWљDIȩUt`iѕкu%v稫PS*=3Qֻu%W]PWBZW LGWjZWBJ2UWFWq˪f:pNG&jϢL\bb]"Eb8y*z-ZWwUW{ѕas[uec4>ѕƨEWL໒+UW늌_U2¤# G. /$ӭۗ5`iZ'%EL;vygi=85rZt%VPbV[ۢd&F[^egb*mlQzm'ɩYk50uGV%oJV!*7(j-%1YǔF9uxvU+7$dZv9ah1`uoЌCgƎ(Kѵ)b=x5>tZ2P@ IS!hJp]1m4t] %q9*iڅc#IM4&J(fDq3EG`EA-;Y֕PB QWD)T1v_\&OhccWBYJҖUA(So;U-Q#e*U]ݵqn90wɇp)jJ(mOܴ2k6ѧO,D۾hQI4-dhqqZ4-JĪjKt 5\ Zt%JוPڪ^^-5W?#w>>|#hq|$3~Y]1D+_==r>??OIߝ r\9[/|y}#͛#|cq^J}-?tןD@$?>Sr/%>w^͓}ӧ۹ DBt8X [h=a$&=8 ߣ?wOޜ ~In;4Ky~n,`i0%θO]YsIr+?jc(Y  $%9oVjh((DqgwU`[,t1Ƙ{8x1l0㟫_ޟl8-<¤N<.&͊<ݶ]%* zs򍽼UqVIxOm*PďE('jDa VyCoi7-rFd)K(Hf7\"yy`!4iFo#ƻ@o Y$ j_mZgfgn-77pQUXwS`Xy^`d- -sLayC*bڼ)yZ.)”Vqg43w*C:yjWBTBKk$JB;Dh)!K<a U[k4OW1rqr’/}T0oE.Ma[`+%" *.v ZC zȁ*Ngj|ȥk&;_ui8zV_If{q[o3^?OVTu#~lj_X}Lڸ<y9^_?o2,3_l|*,aSˏUCHJE-33UV֦5p-_R1v 8 24J+DHB TRmq&#WeeޯB0C嚐O!u' d^J xt !P rSʂƒ:,XS\J8ๅHzUI$zF3Ƌ_'/_ϛp< L0ǃRY`Z$bԸ!-L%ʬb X#5(O=&b_O22.tiwy~7^EpA. Q hjR"l Trό? …i\&zϠ?UR[m,Xm;{[zhGDRd8`ڪN\;&-H@.id u &csu̼¸{ze,R'Vnr[ZzUb{= l ހ&*0ڍSik*]} iEOI .r`ǾS,W^u|g-bz >АT& jGLEK"(.n!)EwQkڠ\'[" sҧ^Q~βVu ݀rwccw lTT ?iIΑx7l ,B&2 "Y@:QCc[lq0>jg-:zXDk &^L(ޏK5kwd' KW4np;.)XnIׅH4?a0bTL"B +qoVCn &eTۃRHVBq ]%%Hĝ 'bTN3hWFk!hAA=9e"Ѻh%2YSI(ͩtezM J4_)֮ɂ!BK+=W.͋*V57Y~}-67_zxW2]%paFy 7`hXH%A[mrճߍv^AJdI20=.锵x'4A Xv$׌3e EEF1 K㶉Z[• i &\)99JvMf&t]_-b;o # #ԉ×)q G#Z'k6Hۚ@&Q/  4 | h TBD  aU(c 8J p( ,&oVcPפIo=]yQ+pisR2kĤs+J:t QHKNNqUe.[A RyʘBu!&E+R:EJԫ&ޙ̈lX"\|k1TH㫖v5a|RCP,i(D/%V PLEo! 4|n+0VTr4!Zm:菘}auq7:&E٤X4QJ O mɅMJNAt,ft,EPJV==]9]D޻k%vgoo,[s!c\N)zO>`A,煣YQXapPq[sgnԣ$:~G<T4HCVӈ( ܓL\}\9Jk=A;E:FQIg18A e,N@!4D/@aؘ8vF >|o%80D(r DJ˼/ X:m[B`ƍcgX @N̅}iek{;#ǘg3Nl>bk5Ն$,@͓%$,/(`zPֳy<۳T6m{Iużq{:]ٿ d,;\nûx_n|o=WکhGow|ΣC>qxqvz_3^_ʕj.lzhm2Lopk嬤jʹ]&Ӧ9H;O .ZTu:Nzdjn?fM^oBg{]u38ùOJIX.W#F:4gL+pXTF楶$ShP?Lfq/w[.2W2,%aiHv3_^ۍ_wT!RowĒ2y|APkB I<8 ~0y_W:=,!mR'y~#*9:0S-Y!FhA$34pMTW)fPqOZ5T^~SDw@&xt[;|zD9]{;1*K"1G+$`VOQz@q.1(3zu=<ߛ$@r d }Hru4X Q"JDaSa%4u.* ݳmb^s{\bbt^*.h;iπx 䔗>PAS'hZ}sVNF_cТfM *P4.K|!Ԅh}OyOi>]Eh$s "3Q&4Ql$LĤpк׻cJ{OG۝ɴkЈtF6pz#FLb|MSl4¡W|7{ߓ?tdt߹zƏ/nT 񧻮n,{m 0IeHmEftM>gM?"eO杺5Qã1xlv,fnr MӰmEUVŤ-hD (5* ,ɲ}D祊}ֳH!E U.O;q& sF4-)Yrό:9 3L@#$9u^^u;ѕg婈 { D1Jn}Ґ!JҤ"4TϐDl78̛<;gZiR5(l,Fi`PzbtLdFz>f3׻bޗJ~;ҝ;~'JK}f<.10sRQ'"s} J=P5<`w>o.(c$y (BFj5r6C8gqgwpƭ4zc!HTfKY&ٲ>`2z Hb6N,Rݞ&D%GD.KRYzkJpQhMB%hu<'ǒs9[އ }x璂, 2Uv .dIY#p:D̠ҴlkJ BzKMk!ȹ [<#yf f&n}pzʝefv=1޹IåKFA_t9x$1њD@mt_w%h'I0e9)e]DԈ m/#ΠZ@o]7lz3 Zv݅Z }o]\k*>~?o"y1M"L릗 _+^dɌg]LN57IL|B_i+ DK ވ,OLԢ4\RFg4A$΄@})L)R@]T trϋ;eY6 15xQ5r4ʤW 7v[\$*uK5p2*ɔ!^x imt\:΍s{=x! "skHOB#3kYY}J{Z{5ty36żUH]18^p/W'.V+EadMiU٨$DCT7p1Nwx4ʂ! a&%яt [g J)lR(}@ C BF_"{*Fu6@7u[w<)~A̤RɁA(4+VG3cdN&`@G6+c"*ώ_Y,Mj 6[ʫ N8IH]$*#*-|*@s<)z_[Xad98X3.`T2!$X#*zsx<xQg"tJ&feM25AQ2Bs4R]A%)Xr"ђXb:%En`Li-Y&( J GE(=+:-V=RfJbOEx0+^wLN؁QJJ[NWr_9ӧR/8ZqTWR9'p9V#m?- @2C θdbт${~6Ó0ޛ]<Ék疿0*G!s&h|\iM4oxEGpzj: ]ީ/x=:ih_wOZ`lPùtF[lŭ!h KVv^:qL𝲟Giqh~yqE./>!o>x>sd4=|Y#jr'8N15<~xaD0d%t,HhNO[D38tr͘#t*.kԮg-ͳΊs2R>9=֗3QQ|NojoS qJylj+]Bv¸,Ci_ƳQ\ܢȣq<1 HI(?|:??:z/i Aql,yǟZZCxˡ%mԋ f\W;qiɤWƓԪlGA }!: %6x_FRH1AD ZfBܞh0IkɊB!娣"!;ʥi?}te,"s:1љ6 g ,feB@ZidtV5رt[&?gghyU"mX5N؎˄]'*D#K 5J* X{=s{SjGZE" )%B)NQŔ"t΅QdRʥ^6vI{Tn"VWWpZcs]@i,H.JʍuYiŢ 2z8,8$0fLyCJ;R RӐ &hH5OZV"Ȍ5>{YҺ)DdR%5rP# {,r2 B9 b%Ila2Hy&6+9AC`NU]C&x6]:;=MZ 77⎻g+9FTw-_J)M8ΖMʋUNkR*l+\,P4AVM^17;Rz[iww͕ǣi,`?w>wk卖2- ۿJQȃL i!NC=W % U$"9%CFXQO [I^ڇoz_*(Lq:ʺxE?aFߦIn}_/ӫi(?ܳ9#X^ӆg 3&[`ڊ@EuIm qF/Qpr9_O5O″,\+`(X:?5J;PtXu*3 B5x7swܮ|\ѣ~Լm7Ҹ>c0uǕB.2KeNR#36ھjl~Te׽b0'4RZLu2C98a2 ,{T!񬨘uqC]DAߚzyDHHl] :KTʂZ͂GxsNϛ@b#uvWw5>ٵIukvlpjЊO^[\pl~ʛsI-W(fځ 6ɀbd)q)0O̮fnO6C`=-TFm.1A[L9'II֌٢a5RMV]u _T(*vqYqGɶ,. ;?< ?^MƳo\cGHb&QJc@RF$X<";&+@Ђ'X1d%Ȭhb$ AHQԦ46"B\$!,CLYPYcW#gӴfbEkW]hn$PPAE&Y*J(42N1,Ȋ-bAb[ԝa>dTW#g>lcz>NXc_4b5I#*kD7hA#>6![s"xk1S dE!D ZaҌ3cHYuΘ%ecLV\LH!B[IX%%-HF=YQX;9[D#@zq,Oj\^K|^|ܻ+KA=#G^ڒrЁ 9x) P^| ؛Zܱ>\Tت%].]#m>wj 0ycApC+E?ZT u*/3 t*'j SyS;TV)poJ])RWJ vԕҒ_]z 4H[=5ԛymF߯gI&>F HM0^79^0dT6wiESz{Խ&a;,e|I%YtS0H56jt)a)s:|.{!'X:<w!)t G8ӽ wO"g{rq][݉\ۊlh͟f#V\^JteLDrA Omo!dRqS /=^yjMΆMwWȶLp腂 dI!#6InSF@4< xn"l7c K}!; D얹H9Q$b7E ȈsD Rt$kF?>yT NFd"j4-TcZU4i+IXmJYm v>?)p=-j^fZ*ZTvS$?˹R7Viե￧\:[ZH+~kHovV[!"\EpZ'gI ڜ沚9J nh g6Ѩ%:dY=ZǬu; ۽T(Rh4IUxPILjz#0[L.\T{}2ޡW"NG/D a-.Fl<0h_T&C*R=%G^%cNօM,OCHrM4nǝsވTRmu ; ףI YamoaxXkQJNiB0Ǒ֑~B_+i$ԳŢKOAڐD ɞ5)0sS҈gE¥:itAڳRȮB#MMw_KͲ#oGDf#_f\*)c5.D95>#b !<IJ!ĄmB0|n=թ[M>o:@GVZLAG= ]I4Z*#``!&SAyCs ~XA̬ƂBgs՜$rDNW2MʴL5c4o"AɣJT-V&Э.H#38`GMƂY,TGWW1hFymV Dw*<}XoݷOC͆.k. O&!z*4xp9-&G!]JOfl~)dl%VmZb*tXI+,Iƒ5$B2Av jPjj5]ZzVEC,¯n52Yn$BG l`_i:hIYT&ŐZJm0&ܝW/6{K]Nqk_Ӟ,"1iD0uvtrL'Knc+a0`M˿4DiVۆnMEѻVzI I"e_n0& &h؈-gsl0$Y'%\IbLԃ ( hTY")RC`7] jV$⤩Mk<)@&)E bX0p0) p#X("#&z!i,:WQYj0Q380)`c37 3 ǚ5&EH3i^&&#ՕZ W-EB9y9ݳNKyנB]f,0D/lz;k* X[0m@Y'X©:lti =!V&E4S"pk*P,`p(g +Q` %׆.In5 DqXOeIܬ>jEdDL0r( ;ff5ФUFCt]Xz%a$di_@m# 358 N}TQ~Ջ6B˽y<Э`V:s)`{:_r%P9@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X JV A[1r%s9ÒU+JY 5*c%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J VUzC@ +`/F k{J X+F%iV@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+^%p (rM+`6@>@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X (ow3kW?q{}\P]Z'};ey8 H/I$.\RK>t,\wz&'W6 W67' W?>A=}"pb/*\ֽpEzRYsWDWv_vE*XoC%몬Hz=ϕ_c%GErn_^?oH3mܥݝjE߽K?&}0ʿD^i3p(c&u R沏Qee*.2>_ELڅ֨+]7o;^vpýrA& B?:Ūzf3Kg[I>`!;b}sT6᨝&<>]4KMӇBv-!SyU]?eC/ g*ez7/ u]^ҫ싎pzW%Rޔ^<ÁuJj eG?Kc6ZӸfȟR4Yd۝Ř󿓎T^u9_w(+Ety挷1-Nc)1)ʦs6ށ&ថt/{jSil{;>:Xs3~>-W wm,o[t:FvJوf޶)9;V_\6:y}?/֍C.ÿ?W¯{9!5yy'C:8!w\8 RtNY'`>~EqL>\m>.=~t* umb;dxzOHP.NF- B=Fq̀lH $5PM!PLQI[gmgǽ%bz ,1u{>\Oc@S2Z~H%xclRn39K2vmٹ.E]s 1*U6u9pw3.fˁÕi' NړNEjԘ5TtTZu;(xe#}B6.1 @ww (nBÇVH!!._l8#TS;Z.uos,mrbephAbp X+oͫ=t48B> }t sbBx겫) $Ե/ݼߕލ`.^2r%;#L9]iE*AM S3@]ͱP䅻3ǧKҖ3I[V`~m)QZ`qS Z9;L6xG5exsxkT3.nE͟/yG/96#A|)G@jd0 RKd~ůPWSAu$n?a^wG_x߸?+>`[VKuf-v}&ە.s細5:&-LTj7I5[3\dTPrjU#R$%1;FEQ;Ǩhia航}7 Z643K=R!kNs)IJ,]r E,jdINR&Ve{;vfSO]$YIP^°IvWԨFZ'{hÞJIM~ĈУ(B.UC`xM 4ZL[ `'8zJw_ѓFd/A0q@ekt{z{HfѤl^AO?3gbcAQC@IZ-8&+NIuq ?wLD ȅ^x31fX b0ܑIѫa1^:!ڮX'\гSXdO[wBV.X6!yQ)ǜœ|.!`tmR'F(I5︢+xrt!JX:k5aXK5&U%@8,zgq6:͎?٘xoms*3W''FA+2RsŧKQ)7i7q} 9V/Od-l2w.\v#ŶGZτsVAyY/h^pZ1w"+a$B|oБէ3=b*ܫ~rcIͅ2>Xb(2&1㔏*gȹ%kio-ꬻuz)okSD됶g^>PDv睯f4\Ji#t"Cg>1H,/οtS16*pNU֊ ΤV{s-}%܋#%8܋9y< GB1F LL}Pw=!:E#j " ;!EDL2'3Q#Bd;FVG2 $bRD u"mDɠA} u$M| &So%j&'ʚKh\?~N&iyr9E FA}7ExY߰,N5/8{'Uf$Ls"J%#+YD+K!z΃Ѧ1߃_̄ ?ii*"ܠ,xj6#0 !|PF37>}$tE\fOd WQmHش!tIhIEMĿ6QtPʬPoӐ[I:o Өw5N#(w5x|WXXyz92i~]|l7jl[R%nߡߴs+;dKڱ5oҥ®Dn%m6!ѭ=,K_̚]^?Yv- ]ZWԺi]-Zos<{Uf2WIíwmz|J=WdK˻۝wnƼ ϓB'F/}M#W^?k.l?[7mr)xqufCGagi3XTFo?.<3ڒadʕ*iZ]0XA&Ź_^${׭v:VoiPHEkZw5f!`FTyE .aT1}eqx"ͰgR>X^xaql?ĕր Oh_ AdeP4"\"<]U$(|v+8Ϭ9y\X! $馟#FỶ qw'7з&h]Gz(ڲ \NdGx %^itK߯mQ& L R1R9tH:Tj}T" 岰}y=>.dRVBA}6ulRBa%sA]Hu[ҝ|l/|Hgua}O_+ Zc|R:|4 N/[4).Ngd>a=}Ǣw٧Q*V%lnť:z,ַw͝Wg2uМ..j:-A'4XW7sN,^$'y;+i:Ւ䚴tup_ߩA-yvRxL)U5^^7psq9P?BJI*g@UZ {8#;\Ȼ QӉLG&+p%w.RBz͚kA.j'{qLlo'ì*M'}fL .8z9>}(gWS<\#rk(.0*Zz ЄbF_RolB5޼[dy].Y8B+$G3GiG.T$廥{Y3<>ajR |Wqoere`U 40lIih)fRb2)S`'-igx>R; sq3rsBZA8aE-QǖuzYv+"3ҔSdcjo4.(cla[tf,xLw|9#ċY)gr9ů S=Z-Qɥw K,7Y !u[]2Cȅ`I*C_M`]T[OJ@P34sr,wEΎiBS%,@t*;G2CR:Eih9]"fPNSؤq(]* .H-9/2$ё#VrZl3]b$ k LN7>8xPNʽf\ eLj.]B2 /" S,@UʍJL:d!^2r= ۭr8IWok\dfAfhSL7wmXt079~R%:,L#T)mBdm 3yA2̓fJptr+:εsdT[O&0mDQKOBwmI\F~-u N.Я2TlU_Dɢė9$g8tU]U5h &91ByW_bcc޲)8t +AWM{(HK 2тI!CBK#g/jӎ~d%m ,De/䍢Ze!Kw*>(.c6Ԭ IJig,2#7J 潎Đ@ka!, I gg6"I!]f 5?Zd3tC~,Y.]S-()K&'kH.&',8-qNJ3_~z[\|}{iZթLj=^q{& fQ(^Dy&< !À3._sZM&郿|<~8NXͯ-o5KNҬ=w1P{e36= WK3a8?+3(1U-OcS.q>}\D;a{kNOONV?8q.jr/&1ݸu։ Z>٭UgЕNwf5ޢʏn^*f!ܷ4 }+#xM!k~˗{F$cZ|mKR=҇ufJXdyKBh\Ygǣϋ=msa8}s뵭{E'׵V qIO ) +>_'o&i0΃B'ƳjnǩVaIDcx7wp2[GK.!;aX2Gq_/3w#jT5tULuhvyW$$!<{ߔdz޼=`^u/4.h:^Ar¿z_47ozGEMnId״Ғ cuC)»‘,yd~P;^#ʴNʹ~]mb+?$H%dʡz#)":Ea YQIssPA<0¹lƨzo6,ȥڸ҉$ggN"0Y֡T62 iZާӈOUF;f]O7itדxvGDn;<rWvC̦n>lg2>YwM2X%ti]"SRYN<]ѧ5yav"x,I,0ϙlhb7F%b Ld޻!sI\ϒM{/iR\RD5)Z2k9XsZ<-.OBKB# PfDyky@5#Y^O1Gcѡim< l6ZT0[ݰ4g[C ZR*55QQiMˊGFD2gANG0Ii^XCXZtI'W]ȁ7똵HˉI&XTNtc@Y9{_#G7VdWuɫ5% ml u)GgmI51G6ky`#3<3z ܮEI\ﵭ"4'q@Z'T,d1~o;OGH+t"97\f-ZM> (i Mv)cD0ښcYh1 D=)`=DY:Z8IDRM3ccp6aؘ663vB0 Jߦ7E3>yb週Inp23v@FH:*DdD"JgMdL4!YUJ8RlY2"Kj{.ރBBג`4' Ҝ2HLu)`lÌۏGql k76;vem0k{ *܉ =x4E0YȤ P_uJjUpYiシK!IVt RCq%Ms>fY :t<͇Q_B⯇1bl #66?vbD]|2"3c3"WgcG1XGrd&<sW2E8Ӛ$ɪmVCDFdJ Y. 胕I'͍ ]%% $>4}18@H4ԞxqRͅM{͒]yS~wUTOTQ$M!]@y2scD*3=/>/ؑW#>C#>-~d>kFe@Zosa9Da:bVG >X@Vkk~j,I=6UӤxGI3pQVlvʈ;is;vښ{T,OXjE#SF&U(ʩ0R:cŽ *a,Fsn8f-xg}QȜ M3 TttRʬ&uM)UW=60:!^֕ˏ %:]_[̗? h5]fvpU&UʏBnXj}Å갇 BfrPlBB.u.mgn9] J=]@-WBgE"gtUP ҕЖi!"wG*pBWĶUAiYOW/CtU*p ]ZhUA vJ#jsRLe}0qǟޓ@4{L/joq뽏Gimdd|I:G8*/eRau-/:+F謴9\0N=+ J"vtϷ5`Ywd>CӨq+߿:R9rJ\JW vw2Lyuh5 άp֕նUz-(m)zZ!".pMg誠ӕ咁%ҕNЕqݰTJQs4!iճ 䀕S&g/\[#KwTšb)R3PFnw)VIptA 4]PޡipJb9]mr%t[tsP5 ]Ft Zl=]J`HWLw `T+ŰZz*(ҕ.#t ZĶ\oЕZpe+U{sۡtUP~%ҕbܬl=O伪"C^äkTߏE>WJzlT{~V=*K/^ чdQuۗ3V‹x~&+Ss%1bޤʫEŜ'1_^qL>A3iLW: vVJD~XQOo~f5jQc"c6`%(m4y&Hj-1#Dd-B$m vAB%~y\Blo?O%̘w]U߻c}X/xh8}{ˌ0UFһ{7u}f*=nD}oh9WdzB;M lRن3e<noWslz97_+ vtRB}I5tpz1x}&E|Jw{wf3ZE*vXK+dX<\';8/^~z~K^~- \330v*0QHa˺8owd6K&ή;Z$5^Y=%xyCf:+!p#C&hu\e= mζN-i0osOf[ޕoHDپo^˫ꥊs F[x:^OQ?gwbh։ Z>pv }[^x鿍͍Ӌمl1Dr/(.ڹHo4x]Kbqbl[OmjcOJor[7bk7=ouc76h'2e󉮗}.{"k{^붱:-r5 et}:4a{+jxr[vf"jM/FAtjH;iR0(PaQ+=r)ΚhTm!Ng;݋ ?|o/{ |#-Ug[HPL"@Y{]KU߼k>]w:ل/t9-u_Z2E{.FoL qM @ =4qYRG ,DW; j_k/HJm$BbL{θs"-3!nd4dE.娣"]O6;ʥj1f1byW8|AwԪ yg6nxt(C`)^#3,p`V&*I+̧N'1Wd&Ua~3X3 ; J/Y;la{Hd+B+in<,+}ek FX#⯑.q$L"?,bd}p15C:.l2dM۳E$e~Hiey'ғ; {7?dy44֫|ZQ38_ԘkM zF6+:yKF Y4fi[mK*LӞ\%kniu(3d9y=NN=('P@lRRPȔԎ%ק!ǜs~, <Hq$i2gFDwǜ&jT1ȤCGI)ON`6=EI"*@ 6396o;>1L9ivw{>vs0v78n51I8o}#mֽ=}?{:c6;" fƻ) _G͂3cINx]wȴ<@g \7ʺ zo$7z޽oha^7%Yϐ̸fkmK1'YuuF{8\"}?5Ku=Wo>,-`ڂV@Y hZJO Z!ʧv *U[¡ % Bg^9O@1]1p"'1s W>n0WQ0Wt}XTHБ-Oڀ+U@wU2e]31!EpR̉\jxB~ѵ3ru߫׽`#@a E[KiIX3 ˼%dl ra2 XT!nYQ1 l$ C,%%y)GTSt. :KTƎ3rv *+ު RT&"uu{,@Wݺtr[\Z|!"ēӦ~ٽ-ܸ6~WwsI-W(f 6ɀ8(q)+0O̮3$N5q6C`N{Z\ʒ R=]tڦ A$嬼ds\Tך3rvkJg38Tuz"Ia4RDl-by"ɃGkIӂ'XbZ'ѭƖNfE}c'dB6Ѩ`NJ=lT`r̢uk٭xVLcڝqǡZtIk}ϐ@ D&1e&:iT2x%%Q~42npYcp+KV"+:Jki zL !eX$٭N b<9ckD8iēF ,qmB6r$Z1S dE!D ZaҌ3cHn"8ΘTJrr虐Bt n+,iA2"Į3z;#g OH/L;MNΓ^߻f.u%2{$F<[:T";<#e3C/M+8Pf|tTmշ/5rG?%ݿMhH;e?>Sf<jNnͷ_uT4'U)RU@c51>)}x@̂iB]PIm$)zi.e̓!a5,`rR d/ qWBޅȜC0!uJ|e\3Y‡ gz6ku\2;5Wn߿O?.۰fO0IY%fn$-Hfb^*0Dpjdn7ĺ%A?:I.sg [-uq<sc)#QV7Ofh czY)|e8~BS#H=82\A#&!dlYHyL>A+P*i h:Gܪ,MLIY,$r8$kVi)&KX"RXt :#g78"8U\u𞶛nT$=frn-j=|Ԙ?K: s!IR\ âb^ Ǣ 8Ƴ$'Dxg7bF [X>#tvROeଡ5+g=7dha@>}CQ8lf˥Yn]tP*Gzޛw::*E **W*2PeKn)No\Vz v:GQI2)偣 TB e {T%|rovW5i`;>踅rZ,EgӶXOP TP/6y34,N>]p yQ^T*eWt^E6Z{zu*{l)#DK|磳Ji/ % _JȘLE%D*BJeC BV+6Dh%\"K5ܵ;3rvICWkp`d'ɡJnS;.)ݛwbe:8Agi렊gzɕ_1򔜤GZ 8y8/ yɞ2lSZ|i-SD|U,Vt7_j ԑ7<ӆ(ҲZ6A,84).>IPVj\1$1cCƽJRz3Xkd KMVM6"PSnou=r v!6\" MdEHHsC}#uUf/i٨.*@h@JX+ b1xE2 bՠNu5)x\(u1D"so"3\rH[v&u[RM# :tp͊8ꙶW@;Ż> dF {6#y#胆k8U_:}x-acl=rbD̑LH@ZleX15IxKdt^:!`Rb$$C [2Ii/JKH?}{`{Npc̈́OS?~YCLByq5%SEբjTZ^I1NkGoz]I|zsut1]DFE#JY &+Kп͏!rq{WQ:MJ`/V鷳v[wh_,xj"0 !|.ѵv==_א7Jڌ/8]owŔuWlH Z!rKtKŚh YΛ=p]n"dC2i=m-\{&VvE^r6K\9~BWn'G^~ZL\za-r>;oj;z DŽ;5NE JZn?}4WP7~}\,̷pݚ<)~9Awj}Lܖ~Ѕt+V} Oiǚo͇:\{n =n+<=W.}?3F*fјwT4)@r"Btq<Q QK]dPFeCʤ+} $tb  x* E&HBjQ5~g}P^튐|qp8/==ZwЈ)*+,j~۱-vZ.|gţLBt.&Le'P9:$!;-%QOųX^_QtQhYtGp`1K@Pf,) ^3.`?2èyh]?py }އ2 =Gԋ@Qmn'p؟Jx/z&&MGWIlAF%z&Im4A^{YQ&,%P?tQFd@gv. %wI.c@*(^{GtI}HB* bm~݆o2'9lF}Ly_vb?O?dYrl/gy<;^_Oaqy'Y{rr|qֳ}Y&|BLy(ֱ(wp8$䐷CKJuXY_kGvͺfu-`7࿦dyyrU&üL9 6/}0}~#lʅ31iRjg/X1-gj7`k{:k"zUVF$m3zNVv9N趓om uRo%}eh+#W[2=^VF1J&ysأk E;\os;e]F.a݃zAU`t?R0ӋcLp2_4-+vL-"k58vG~ul>t;674 JX5W\+?-tWm OLR{{{9czQ]`7ׄV6iЗ}y|;ŋyn|/F[۝zlyz>9lxُMjTBY`IZ`9"h+iofnHgkThg_f _8 NF@c,LJX! _޶Uh }pڱ>p>9M VJ13# MTHt]NQ<^>ŵe7w}E㥷uՆDHʺ3CEj7*-4^T _,S^\ؠͪiQݐp*Z[F|Wͤ˭ڜt|ld wAOh:DJZZh\~2Bv4U٫Ye~vхEDXLRFAt,4Mh Չ_ MÊCdpp~}J&KW~h: ].,DW늞KM]!`y4tEptl,&:Rrf#+~]\uh䉮AAË`D_;"ٹGcRKܥ0WyjodY9edFV \2W:+TYJ(I'[_ϥYq3Vp35,n&͈Rt6qp3P`]!`c+kY4tEhߨ#]!])*5s74罬{gT<w7UjaSgxS?u7@3O8 ϟ>x(Hֲs8v~֡4:WM u>T:W]UJt3g6dZfpRWNts\MeAI,RNg.GǕ2~f#5Վ\MgՕҠl27XXj2͕UF(EU3 Z  ʘN':hӡ($ 'l]4tvgKr:]J]!]pNDWh3+Wu#" B+BӁ1ҕZUppp)tѵr ~+MixBT - %$'i)]!`:"2=;* eJt;]Ez&> 0gv_Ckt9ʃTuEϵ{=۔0("t"]!]Q#+|Q/|66+B+YtE(!YWHWh\DtEӱЕ4 :]Ji]!]DWXx ]!Z[WR&J1Zۻ}}TeV.CϞt}ڝlpe =\`UγC?PBQڮ\*iED-ǢYmb&ՖІA(CI^V[m*"BzmEWWDJhANW28DW{+и2n UJc+ ܴfpQpu+m3˫xRkh &Bӄ֪iPD/=ƸT+/p2?~h;xT,|ڃtuEϙb`#+̹d=쇲1xJ0kuXWq:]J$Ҩ Ǻ"BW֚ Q*]!]pb ]\uh$F':BeH^օXS6zX_A89k]J dK5- (fMD> n'wG!Z0 -{ZQhXl ehA扮BWF \Dt""҆NWRc+ BKpw*)=CD%z+x[i@-/::+* -oUBiʲb*0xhJDXtE(Eڳ{9teV1-:|Ԧa9p-/  ,jxЕIt9"+5q5̄NW2;1BW‚+a."VBWs:]J]!]I86:"6 B+B*#x ?8[u:8U\ e;L+8h1+Ei:ޒOwQ"*⡾sge%: Bispܴ1mJ*+ke,thU[t}t帄?BDKyP@pJ6纼 GEs8aAiw3>~|ՇG;Y%1Ao]C]6͠$i{4#cq_͟&z2#@N;?u9ZܡZDд]QǸd iCف4ʟ2-փluEϹ`"+lM4tEp:t"B':B8EDWXD,tEh %pJ PQy-U&"&xPZ yܭ4+kY,tP ^ꑸSy@WaAݲ+FM;Ӫ.XK 'qFXGȂqK2Ep1+JoQ.h|D?"TGȈ 2NWd,tEh e>J2DWc]!\!x,tEhNW2 BWYJҝFLӝJӊ;4މ7}ḭ4.v\2 MZ|9)jд[QǸyq+~8~p0tVyЕKt蹐Lو #r ]Z<]J\EDWxEWX PVS>^J gD_GCWͫX t(5DW@Wg_40WL{5qBU^6F{Pwkbܴ15EmT!ʢKJ|n ;Q¯XR'gcZg(10̓~J0cr>i9#ᧂ:G<8{sqw%O}(g_YIh,0f3q.gM3Q#Wir}(m:0R]Txv1H_ⷼ9[qfͿߐE𷫛Ά绛|>6tF(=|'O7y? ڤj*JVnJ3xXk'6322\,Q+Ua1n8 VH#j>TP S2KdldӔQ٨ ה_rt5cJIPF%*~bUҸP瀫 eV3P㸠Ma .(`ڥBRR-kDzƃd fYTlW_FrAJem/`#Wi^('K>F\珆ZzCxZJ zғeV{k|$8C W[=|v1x>,Ԏ:?blp($JT*Ƽj>Ot Lm9í؟Wӭ|S>g'e2 4S3/G>7l(Lko> 3+rKe,}9ެ̳|/Uef9.?bGrYH~m:ۀ@CΨgOazGOp(uX D9[ql:}\K ٽ^5yw?uslqϼNrZYn7m"j Uh\h P}\#\:n+Sw)yD'h$-坡Kq\&g %F<݂_n_޼vI^uO__KmUp̐g(ȓG2U:_lokZw:~V).N/uNS !@Y?-?yl4̖w{]ڙٱrS*tof1]/ӳ{冰]J-1/-WE} &Y5_d'%l]V,ox p@Vv] "nvQHa8Z1V1LH.jSsfZy=??U)_&!sчj/IvO.Ox<ӹeam-bum+< 髼oaC,{ƶ˫\5nŚ+vHz[zjG\5|D“=ˏe62.htKH v߹Pw,Jv4$ V֏E1kKm =; ,I8KN'N8cə'Y"I6>^bۍۅۏL78+.EIjR%v?tHݙ$KەejKs_7ӗ5jr${[O~P*i/rjۓSZv.9ZQۑ\*!`'JnזDj ގ Yv/ P_9kݏhel\wۀGM!OCffw~?>eF. H?n//2^%I%I%I%*1kI4PK6KX)* 8*wB6 \Tt-Ez);e=LwW7͊M_ty\lofۆ8;!)˟&;;˺OWUzǬi+ ,ϸ>Q=y\n!v}^AnГ&=1^߆:^ꂢ~qM"%۩Y5Ld2'T?VQ׌@?_'9Nh8f[v7=q͌ZH[Qw=]i5~ez]il߼'9~RU_輶,DQK'j]JծS /8S̓9^CyC)n2 l_ _`W[ϼ_A5fA3FrA-O[sg>ױ'tOLVS_A1p"~-Ux!xbE2S8PA#^Biň'K` I6Lz";ݞ*WSꘐR%=fTgcY[51`7 iayu"hGcJHؖnЪ\Y"yi]?-Y)emK[j &OE2 (tkCE9.f}/H MfK4d lN/An]?w݆huՊ4_5gЯG)z͆~֌*|μzl37L{wq72wF[][s7+,lF*=>[[I}jѦHӃ!!5 5HrЍ tCF¸2HF&4#żx2W֔hӕ;[d^U"oMnie!+e),W!_M'\wkGT4`Tb`' 3%ږ`'nU*hvyiji_7).Y*cΪ,:%X4;=Y703=!qM-Bj؜.wh8{&7%SKI-O$ܛS=ԲB/O/)?,qUU ^δ/ղk+,Nf*ef ^d>rV7%do8/%Y&A;MVmrU~hdW˻xy ^CWWtۧ"*Qs1Y ]D`DcM:j|On 9BmqT+üf `O}Ul`* F=1,s8O rVp0AIQ ( s!H^LqɄ_&W,­@4-rj7]>[oǘA]W=K~Ee_O`1|zfp_6I@Ts 'DΔJkMHhDB8"icJn=ا/n|ؼy][Įmyf͜hڠmm۵jƲ57F z@G% (/TͲV $ؠ]s! RQ˒  a4m^y΁۫S1ϲ^7WHS0FR/C˒g*q섀Am;oY^ᢡY;)M-HPYbpV0EB/9e-)3NDz^n.#N8SL/wXb`ęUTW|Zw`%gײbPſ7|̲b}YVTh+#r4^̰,V°c*NxYV񳪊ߟ9ewyJ̗u|6e~B[2˂-fsz`x},+*\Lԕ7f eLI+:I3YV%*o%ˢ STe#ՃT ûYU4v.%( ;pnj%t2SLC#gWbE e)f7W0S pObXs4b̲FUP\ :Pq})8ekWv28_'hANIYgl,±G^GqM'-p D|h2.Y X r&Y%Y1eJRQJ &Әb0a_fY?d'ћifY?]#paevWl   :!YbV( !7!fy8ged˰ifnAjYYkQIY2l:5#v 2Ri.߀/2SkG0J7_v=_^2/|z 8_Nj4\"Ͳ$>P¢2أ?]˻ ^{Bh5mT\eeȕ ._λCO5ޗuew>쿵wjՂoU?Kp6_ǟѡQ270Ϟw3L՚rx^efR~df2KV!V9E;lBQYolby?/'jn&3tDfTKͨP O~OgBSj] ':"7dK/G{1y|=<6&n؂Ue v_~1z9z nn~X;bLyclLN3|':|7.-#HPN2B9$W>IɕGQ~K{Nfۭ6hW h@Nh#X_l̫|x ([n¥u>fZ!wͽv6},M ]yՔ]{e[61|'\[OyBdtE2m0`-;cs@Zp g]4uZvx+ ni_"喽X+\2NB9D8+Y;B{9,i-?ij0Bh%w1_ݡܲ$U/66@ [vppb4軲lS.J&oQ{_$(:錽mϾ|x%mhv75϶6ܢy6bELHsDƼ| c*ag9"'Aln² s@pzqA=95 /29g}G(b{luu%ޢ@,ٲ!;t6NV'A%<쵬8(çx™3g)%.ǒ]WQ:xbq4-ו;#Pu؛gLI5"Q~F~!.d z;—bU(2kJmYr*asV'u*2U^rVf`RlT@d8p.YAgrKdjNPpV`.-yZz[e1 D]ܖǽ}/^.(M{KNZ/3_8eyAZ2F~$,eUCt4h#|!{.WdyE `i\/:QB^EO7ov1WOݐ"ȏ|x-z_eoƋ9&$yA\~|Vg%e'Wn|RcBnܽ+ U$aL:_Rm*7aWcxV,7A""r: $FX.qA._ex?~T r~G7 ,hyKEdh~+]kIԅH.l%>5%˨F@ޓ$K VDR~{ZV[ @{S=nn]ޛLDp,fErI Yp R%*feJ ~7;Hxz}ی(LQ_>~m^ $5ee?\e u¢wԒI'ַ"b˖-Ꞁ?{GoVXj.%¼UO^2H6Zɋ><@`S=`];:L,t)&>ϼ}TӥתխIbjEj[]J}2snN?&s vNjMʸRݰ{-e+ BG iFn:cN`iji8y>҇h!IajOD=9N?ܵ{ڍlDf !(Q}]w eLΨS)ݥXr=[vxfcN?Ϧ8[ERsVBr J$R<Ǽj`e36K_e40m&Yba Swɷ%i"ԛKdN ;WNXvt GfQ>12[! +0jULg1nS-~ZrCIMZƶH C}I*Ċ3H8p63$z^{r ~bפ{ZJ8 4;!9/v3F㢴VDϳ޶?B ȹ ,ԾVYVr}eXg'i+c'r4q->0ʛv@s;E0i2}j/@.f (`}!<{L-FdtZn-k–Ap< BpQhU^7RJ1U Q|yIDi&VhęFI#"<JŒ$-j<Âv սO" D QDH۔G0F?ȫHUrz?bhLcJz^7]v fƷ&X? # `i:b?ZpAjJd^~͇k넥*OiB^<_+ͮӴ=QB讥~ڀcϟN] 7nSlV[*4Q~Z9IwWtƼ|Mm*/'WOvsE}-J_qT GL009V(,Hh.7z<AeL4ce8N|R?"`d˳KDD44 G YỷFc.rzw6w8_ aFj2{54<GD,g:"Q3&ҹLq;j潍|κo-!gtSZMi3n,H4h3GEj:WVTy)PJ1kn3;^_6CʙA,3 "̤gi,5Jp3Y7R"d>C< c;grᇇ 3K pR\O<4]6Pv=]C=_WB-23/QPF[dDuʐ6iP;M߸GW@+">Fѣ>o[ 5%49ܫ7gq:zb|ܘ ]Fg$^0T*adeXX2)1M8h䊉 >6CX~5LWkJ 5wqn YUEȪϲL2?ڔha X_8|bZS?\.\g /%dU{cʼ}Ǩ>}ZWʿ?\q8 탪sw3=_7o-6y=O_Q/Haj|x[wq-3oڬgZxZ=s,QX{{n7L3c&>nᮯI ۵?Cnا>z$#ҀKS=x0>">-A Dk}4=PpF$xP8}gEeFsn WF9}v%r~X?@*.4A>?ڹ":~;Ca5ު? =~~ Y0&7=Ծ5Ͱ)a;(Ætޔ掝ڕy&TJqD3Z>9\0F>ɇ wnN3uB |%i~[zñ5F #[^;Džx>|a=2?`Gg"P/t!{>-;t gMxH w@ӈ(rCR$fm-QDJ#t! 3NՁr8 HuF6m-˲ڌ`N"*8ai ۧ}O3gߺ ,)]Bn3~lk^<Ή3,GYGOe"&>XHLU(Sbb&7|}H5="cۿ7%6qu^&ȝ<{~h g_g)V-3FںC;85:/P-c9 퐳/Yx60OLpϮxB&yf\;9( z܏' 3UM|iT5ܻ? 9 +1hQ0A M|ݐ@,19J)d%VܤF;,A8PC"?$+ҙy>z bcxR`$2d}Xdrq)R\ڢ< `8 A⡼PbsqRk;,9MP LƥE{UH )ι)z|TMJYx 7JӸae#bٚ+D.;ŦB|`@-2-ĬZP2s= Xar lI$2&ujRL$6'ik` 8>JC#jBqR.' C"#b: ٷ|uhcZd\cOM"nU.}Mi \۷zL}{7-A!._]Dq@qX-JMmrU@:Œjo%/ Ng]+NMqD4/}t?)(K։d͞Hѓd,kX*p`!ݥ)?(:EBv&Kx$Tt"* PTHkr?Kl?'/MǓ=r" &}1w$k=\K]3M:f_6}{|1Avik0S# @^ƌ\b'/uX޽7fFvX'62fE c( s{Yr} g5f="g&GXyjD]ͧO߾1(LrpV w?{=/#)yf7OuR'_/7rfxnyf1ϛB=WcC< g pu7T7}Q]w-Geu6 #4D9APH|:TYA&Ȉ{!#*$[YgvL')rSi?oX< ~OM+A} 7 &SiP;YZx#[děp4Oic5"$BZd\aY).2O#hbB^!,(VvJV(U ;0&\=jOͬ()6ORȧHcIxsk XI]e@ ccHcq#"d2%Cq#,k-{P_>PҼE#V`ÓU旿7l*XnIFgA3è 9f4漁j[d\*4aߦS3Ng>ZZ[e?ۖY*->>>S/9616vXT5u=˭0pmPn ]/k66 < Qc$(8VT ~pp_Lw5}(nky]^"xώd2mn2طĈ3 3_EG@2K˯'ӐŒT26q,wɥ|`Y~Ec6ou="C+/`Ņ¡LL޺dTŠ)q~*8C@LMco偋 c+Wꡝ|.|;hNjяIU(Bϯi@t͚jdZ$ ɺ7dK]YN|cehTSJ9sV ڈ>:_ٛpaPɭ! A'asJUR0.coixtW}Z~_n 9. ϛt .Osg 6b+c畺"3Zb9(`p<u>mѸ5҃|==$SEgkdY)ug\ٴ@W_l"; r,ѹAD-IXry+p\AGfW-xS#F6ň3}Geֹxgb5(+*Ƙ ckxL^W1x sƉROSEk|~w95ӁZ T=5ӽBabN;e]S $ؿ9Fe7,=ZRDJk?î"w{w뺭k =j3&lYNd R '|ŽE?tS.ʆЭzb5o> F|Wc:Nk59&w!3.4Z :q!r$"ÍIah:o5)t^_2Tɾ* ׏Vk#[iݱϏ5 ~y%y i|"qJCx1|Cז]"Dwqb12`n[Mw6[]͝ RGnr}m @O uٻq%W/Y췃dzٝ@ĉc;;IbRi;mɦLRv;HΨHʪ~ښT^Ng..zMW3.P,yvsםɡrqZ}r!uYBSUnBMz5yƟ^iWҀsb܃$PbL.+VxzI(i_WKK[h|+.蕼ՊגX+H{sR~yūHƕKy+ީ&FBf*IwsrC1yYފwT:OG|脔n!O^H[^15g;^kz큄t~p4<&uo)P^+)/EpJO%#䂪Mט#d"Ub1oTŠ> -4>cLܽ*CNUd y ;фqwN9LK H(+~jrnvbB/y$Kv2[A*ǾZjU5K/VK!b$:Vν}Urn(;AȈ +qY!}4-%p#"Ҏꓹ^r:}fY7>19¥sAmzl'CUY DS ;<3ƵfbpF ov;swh(rZh ^A; wK|I RʜzYveG,@nk>k=  hX9u\ȁk"ӥDAMwn= t\JѡB?\@_ UGp="㵟 !3g(&ez<tGCĐJrPg7GLL4yr!YoSµ׮uDq:Tu+=Xl3v8; ɹGlfx6;.s6ZfSkfoL^KȘ^/:Vl4-^Fi|25`{Ji(l0)dΟdzvuLIk3T7B\plR_ q権oN;gZ+dlvp!ZM{\sfḒKʓĩ-aŌ$5pض?6 P+eMÅ lucqVQ-e8d'RD୨bH?>+=sr?l<~֨g8k j+"VZۤD./'];a.# ,f;h݉w_Va<Asփ\PIDs;lx̪Wnq_6=-;,[QwAxOװ)_ w:`d\|RXγBvB2w!1)ЭؙCO *LH`*,0Ż^N ٗӖŃ~٭$Lp`}SE*Hmp$(_6(p3EX$(aޗMk9}Uۨ_+}K.X#I\ȶƛ޷u8* ؖP\D/:%RZc >*Pueq|S-Jn=8-] *ҫcg(3rJw$rx{tg|9 ؚbR.8A{#5/v PgТjT oQV}7UӠ;W-sdt0ҔwWd> &0=W2ؽVAf^#j %[L Bܕ_VW1'OǏ/5w]1Z_fFh;mSLѺ2;t(]fH$`~FȰqilξ"-+=N; M+0n)|9s-$\O-w$w2jEJ܁uwL܀—u,#Ȧ :Hi7Tuػp  _lg{ȐkuoC1RO#WwQ^l X*7inuQaTUiG$O-]`+U8yj?Z{UӪAu@qj4uuaPEVGwY%{Wk@L'uلH[;.x/^-ؼ֊!ǧWu7An%`36P.1!6|~OCrg@n9À J.ԝbV+8_F@w&A,u?p0j:Ffn֯Ö긖zc @oMߐql(MTGpX?b\~Vj &^ s7=trMa ir@IaUv^ԡd h89H'[wJ#f6'J, ĩu"K KN-EO{=r>*&asT|*&*=Br$qdH=A'tb&pB˧FAE! #n2WaJ2tBç&Am躰3Otx'Z˿iI)+sIEO:.@8aAp ideU" NG»vc1;NłD7nu>8&Yt:SBr DŽр.M3H-X o$9jQ[QXpQDf\8 f1ͭ"aAJW'x.\XٴɴKZlq:9p#} 3;Vi8+eD`@Nc N#Z{>?bti.\9ʳ7֝ ;~W ~,avՑ8c>o}\̽G_kHw1Y]Mv+"`<1^@"RlF^R)tyPL0/1Ӗ&+-s'(ィIĦ̽-AKZaDfN8U!WJӘk Z [QtML0 #ydQ J4}xZ.aB%9[#t!3DL"srC1 Y6}/lr%\`ϟV5CTi]CN"BKuY5f*&0(b*0{'`+n8q0Cv@Y}č6BLhcn퍪p͟ 7ޅc[ eG(*qH,dᒒ 2&dՊk(\ul9e;{Nf] +RXJ9Tk݌jA0LoŪrcfttqv…$IVB*-\F@(dSްt 6lgq+U1|1|Ob1AK:p[ &n~K8, Ipɲtѻ*zDԓ:OL)U$.zBv$>K BɘɗR!BU 8p.#Fw^ Rړ!44e]VUxQM/b:'w`=/%h U$geLf9 ) Ba:72oj1>[nc:0MJJS) -;F/{͊Gg%WS Z?7'pPղ f1y M*F/*t5__M\ߒe9qWd.\pc&E*ڒH/2#_9L@xń0&ʲc)΍f%5c Rn2PS4*+j7/~],cXQd.ZB }qX}u ͎7jthuWԈWw &~¨XF@2 )pUSY)1z!NԮ\R ox2 GoT<ϜU4$ڑ% 4=鸡;si3ր0 rؤAy08rzU_0q0.$7L %%%VkuiN0dn޲,*h__XTKa\`2F8%upÔF&QaqT`Pq1NR~vE0jSʈn)U M1`-81^Xg~.3ʉz(.,؇tRlJUG) z@W;k-2eA;i3 2!0@BDTl:Iw10M![؞Ʋ64)z)CB&Uy kL29dPfC=_d1Le;MSX?T@1$R><*Pi|7Z,x(¬a:G.@rq2l`@xOHIF;1Q$:hA3_<@;-!\>JNQXT*ݡd.UՄD q 3} u8R^AgUy=WR UW^al9gi8rdUcw>SgpzVl5/ƳxmMr /o[/77OW8ޭSHY3:.>+7bu7f5{(M؏4]}jWrYڝ^YxTQ ^V١$6s!^;x_)BW#É E fNy}S1۰ȵ.ym-n|=X'w>+Y.Q-Ei3T˕b5 /TAׯ|6؍*`00hïFY4eI^yM.)%*LKflF-4ރi3AP̛REb}F(5G%:|Q:w Qӆ"]!ThgyQUBZhGkT#!Kco}Kz{|zKm{;mBXXxn1(wթn rʦ|-yKR9N٨tZC Yxϧ6J"86+R)9wͰP Y5ZBlאҥA~/uġ`[Fl׎',kSquB]v{c" W̏(;hL)AEIi( #8+쾸ikÓ`:҃GstL8gIA#j\0# ѧ (A-B46,jRׄ mj;[e;ƙ̐&N-XK,P gI{+}HBa`fA`]9GmIs[eJHueزX,`kӼ4M(T7צT 6g[Pɛ\d)xn%s Nѹ,4@Ue O=L-%*P| YA0Vl~Hm&}t+}U7[@B=&W8CW<F+ZP@/՚ 㕿95WzrS?ɗ&$υqS/Kui}q fgB :Jj ޼; HQ ḵöeNDHh8X*SL2sn cEݽcBgXD "߀:GO}4+450w*6 ;-n,aZd`a LT%mm Ӎ ޸H鷩mĞR _mBsOBKLo_m5CTDhAl8c p-"M1Uuym]ۓm_w>T.9m,~ 1i縋qڴkTÏqŒ{?!+{2֎9cŘ:@W^+'`kLp)t#hc׉+tsbó@Av~e5}5:RH q `U~q9iǍUO5j'I6 | &gP3.dCE%WYm5wZӳhߥq'X6վ[tS)w 5',x7E0eR4:;m]R LX։tbf@TR2SՐ~xzQH~^ UGgJϽu*G)P2 VFt#;ޖY3tT$^.We޵pBv¤"FmNi=$)T8/sJ%T,7$|8`1'R$)!LƤ,s3 J^^y6G4аz1=/bҠ%1DoDMN2hܻu&!K}8;^ЅtM@ʖ#= 3xww7,[ yhЄ *wz!] v QHiK#ڥ>joW8NPB鴿#]Q19MmQ4UKӡJ*bv6! ٘=*I9qOk F 㶢lSm{Hu $įfl0g'팰iD)mbIcEg^ hSi6ph}N#Ff5z#5hꔋk910PEeqŸ쩲Qzbw14 t6T(?m>Hѥ6Al:[lQxɜb\ܡ5eZ]蝝OG,oφٜOׯHR6&vi#R8_0n lQH`y(}a~l&M/Yq}?e" R8]x(3kXņ(,i1s{d27nET[K@[hDb"Hw$@n6 {kFuw푍`Sh JMབ[78ٛ3Q@Reݩ|;>`|؈^F/JtH~=-҇ZHJ_E=@Δ6=w50Γz<*&!w(NHW<&x!)6IW!G9;d7a*En9RJ&J FB1Ʉq?E)%smHo)6 -%I7"<~ak$@NǙ#dͺS;C:O \e4C+8*f[ȏ9S̳A4 G9c)M`  `j r.A.@QFxAPZdX"VdqCJlӑD$:#JF$uAap(6#J Iݚ{VK,7[%ݛ7ݎ%XGI<*Td)D&Mn`(y3n+mJ)y>?n{8]4 9`\&fH8Y8fI-p\a>wTW뾱qm])MV&䷅,O&>)MjH"K' -ui.,nI9W˄HR苿q;i<4Bq\ޥ\z c6'Q۫{5 d> h (T"}l'dk]N PaY;&q"K}hAbXk.{U7E|ߋAt7 "Fߟ:VZ_IMq_t{_V\dfmdyY}Aa$SXY 㨯i$g4-KSx_K ڢAe&t>ڎgQح rh>;&{O$xBsˈTsނvdFci|]ZӇadd=#īcFJ eDtG\Ij[/A`G5FY$?{Wȍ6,p;{X AC$3=L+5,%MɌmUů*jaG♼WdN^ńk}7/\Q$@jתzLӴmա @ ^m;oRkz29,kR;uⱜ{A3W%u Q )) Y}S2Yck֚W3 k1u-9#c?T篣^쬽mB=aajT4܁X}uSw?n~i k$d&pR XC;z[)OդbOK饰op{#!jo L0awYJ77׍GH:p¤ 6Yr֦M-*JtLҊ2oߧw1$_$~l ##"PvA ԅp4iv앶bOt'y$n{:<=E"nn۠]  u"l6bVi%uȹn#umM5nOw.|`c,}r~~%V; T: 4Ax& DG4;&yr.RQ,=9Bq>gb"mVrVԂkʃe w82+V"DqbT[`s `j'r)_(-rs R1enU,#S;N+sHGeiJPIԮRP{ #,TjHxZeڧR!|c T+vlڕCU}Z+i!}av-TJ2yuJ&_ZUfWm _;5z١%i=y@v<XYo[j#! S)=9+RQf|`jOC"vM#J؍GEQ'}HĎdiY8˱Ub$ `ḵH+k[J*7VWjʩ]YăvehxFŕUj lmG:ӣ}̞<vyg:(Z/dL`)Ĭ!iV!TBU{—,eE*J{ePD*u>uu*Y" ?VX*JV[jӗX[< YHňVƌWzP.Xԏ|BG7G2PXtzTpov;﫧Qz<D|/wǣ=+S3;&X„JL@ډT!WAZ3zoILJLWXNLEzvlnXF4`Z6+SMBlJ`Nc%RqZ9Z9,*U&Uj7o RNDsKyL iԮR!¶(;(ڐ|Bsl]LbSHQD|(6(T!SՖېGD \ V H=UHN#W_yz{oz9tQ(LfX$~tI+ocw4NGOPd{{MogogoG$ϣԱ:hN?y$h/'~|iy׫.(Xq۩x;nso^.d$ƃxA81_!3^˝Y{u:לsk<P9 {!vs$c n@D뎩מ=(`){gh(c U;3NJY] ܙ6{6UnAdBv2AM9Cdœ(*7&@~NOnN]n4Gp9 )*j˶AcTACQr|1r|9U>.oF BѸw"J//8nG6v&Zv?s4Pח9JcZyEit;.X,okA?kZ=EV[+d&_.ZՃ$W \,Nj<`JQuO5k)$$:5/; ;_`JsIJ61%54H(~l5@3ւqV߯|3Vݯ02{F|HI)ȺU̓&jR#P [=ws^S;J&B0NnT SkG' ֒=%B@]O/Rq̀,;VjP#A|sZႈVP_}{4^DEo_pj]! &ؼ{G4 cu}4Q=?W"Lpj_G.+}@3AR}&{u>^ {F b> [מyݙi!6#l~xsq$r!GhK}<)c;n)L+w3ߧwABP'>/N'BfԾ[JZl6k.f瓋_XkwٛCX{(y"TZk1Ǹ[o_1u32NA/!LUj^۩r26SʤvÜm47w>i5ko]mXv2 ;/%"0WRXY e-p (x5<F⢨*uHᑕk=C.Ԣn>h #*6#k,>g 1lJư)f1c8Ȧ9"kȒbCOWBKqW c*)"ltӟV gk^=6 V76ӭM֋a00#39glh)K/!C`9o$pϳb87yl,I^owVg>|Zٳ{{{u5s'>,,ɒ~~O晭Ja?2s5qy`2 nOWjE˯ԗ[uqӕ3q R00m&Itę" FA} To{ς?6[x^%ɫj Pն3#%6"]ۍCXq*VѱǛ۾/G+qTg_$ ll혽~^?a]dkQo>ؖ#1P2֬HZŖJykmk- |W#|"1PIdzj wWNg?DZ46*[-C#p0}5A Ec٢3'n%|E]پA_xnEk1ݨ[r+[d:tkƻ9SyH,`QpkIW[ ;8uO/=|Oa۞'E3J|]l)om 0oϹB-q:IEvcmEv"ZXGΜ%8|z?͛+h,cUb-_v*<^-C<<҂i<=H cnYPpy22{xR.﯎LLs)?gҨyjipU#솚osi1FOT]6Z~g#99ъ/+ܜNъk2ۦNCD‘~JъygGK=;<𼔥4 Kj`,N*ه{\,=/>^9YMˆZ:*O~R⏕7-\,\,\,\,be-MџT!))Uňٛm'LD}RB;!\v6aj* 8DkK2߆bᴑcmn8lexRM+vGQ[p!cs_rgxpZk8bh4Bɬ^OVJdmL$MV$xGQ>%X  fLT#"w^zjI<U#`v%|H:KF~,~guf<-iqS#/ap`: |\ +]u8'HzOuʾm zWLjQj S&XMI@,MFWb(v&p }07WKs$2-wμ* 9d͹u?|1ZR*-j%{ SQy(w1qL2Cոd@8Tv[E*8J`/tƣpG̵xOLg*@x:-9=# Rh-%Q:M!gbx?,q1uRF>A[,:Kl?$M#zif8- qؠhX-~4W5I)ܨc`788jU9TW0%4eԢxKΘIx.7mFɣr~W[r![sM!!UḬbMxG? hd*͝\\}-65U7K%_ml>9SH2g FgY-ZRF4im}6]TؘXdH8U0 (УI>fD2U  nS78=bo_w[θryx}]4M ֚,M&* 9t#ls l0$wH9RrEeb59ɜ;SEMU=aRn-jB@yLv}n0C*Tͥ40 -תV3Uyϲ=ݥ)Η$`|e!0ڢغFvZ@@H3VIYSu ZOPF DX9?A#+{Y|y^ϮUX3)W꘍փlAB!L{W+|4r-aҔ0Ӏ q,(cMU]ءk ?! s7-83(ieKΠ$K@ȋu@Oݗɘp'koږ Ki<іT:Sbm1gR:![H̤c3 4["K\5yۖ6ItQ[[F(ȶ LjZ5RZ5t mγXq\\y(dJ~Žrg֖|fK*=C}*L'ܧ< 20,\ f&$=csۺ-ˌ &O%jߗiH͂PĐfWnP{ &Iq?X9*ޚZH-cMBlq%$}Ea]Xjy`kx%:@f,ē2U)'r3)oxP)3yfGM:1@fLPuNsfuBQan ]t"\cnsǟ˰՛[GkŰ^@IkC0s_p}γUQ>QceQ)&1 9IPPCT*H`+7+qͭR7Dx;)t?[.=.b6=_w軄yirBtϑA`E2c=bcoTܥVN=GP|(&1Lu0oS9)j3ۅ m So~0T'EMSϲ|0Ճ}`zRhFpӌ lkF65#ьq3>pȣ/!h% St7!"J9UgbyGOG7zb mʣi|R;% nc% +9}C$tvW?sJG/ nܮ#)vVaۼF|3M|K^gd(x[7e(R#Cj)eYv;}FL^f#"?}=q4[ƈ `%lutȼ{SGpQUI1; y ~A>,Gvth8k'`ѥR!jwߢ s5תUm0q}8}-gCέa ^ҜluvgD۷Fv5B<-k31]ԁѶY-_z=yJQܸDJ޵qB/uk~rp8/'/c3#'T/pw,R6$n﨧l8OO M!fJ3'<ݨ;uȻWjp [J'7JvwWt-r&Ǜ[7)?u~VaAG@xB?]^쯿]\(h|J?Ę;՟ߝ_:;:Y'58[K.vD!sF4k9N<2p ,B4N%hW6 fC}(%S`s.璊M VDjtn06hU@maN .B-r0obAA,a9ڂ ^ }=<=RDsm+\;.qvze'm*\5kLP1*0Jq(^7Sf \]\[E=!2eR>Dgе~[`" њ}IR>Bx*;DZIya2j:g5drRHUw$C>As?=SqՌL0ˌ% Γi'JK\hu߷}**drn! R ö.3t>/bHݛ79oR]ЩOX]X\Vv (Y1eqﭷڔuk<B$#lH'5d`z y Ӊ@Y`p31{YE1!mMIr M3u OkD1Qq"S`nLq A*:-}^W z'6")bGb=K#VAʗ~ C;d5.1GVt > W? \Til!gJ$),ц`0գN.y P@MBꇂOezX$`Y翳0" Po7[Y1KET{]mlYY|(àe&+Ty;+Y9M ,K֫M eegzue:D#TV彂!$Z"Q@ o:Y״Ҹ׿ww#J}_9B)Y @Wjxֵm,ӚlްSdl)U =_9Z53 zqPj1c\dJ)Oņ!\9 /Qi{di{7bz8r&f.ϾΚS5 ΔY{8JN6Ɯ+R|*o5r?iDa wr&3!l_<,X1{)6H7FNTݝ9/ t=k)()th,cOnY*q87>OU;2' .}ttnzQFy$ ".rWO>)&0^Wx1Xl9rW5C)b4R0X^$Eg-uH]W[nЖgЮ+fn}]C}Ű_@Gxf`>°'б'>V+/I1*5]'ŷ,bV[In+ ZGD-НKQ>O_ܚ[c¿j5TBi8+CO}l*mV-N5_飸$M}՜TM ~0A=Prk3'7QEUY"L*]|wwI1Rw[ܶ<"]`0 !7}8 80RAIq3Lz>;\H6 >b !#l%+ Q-X(*A zBa)ۉ:s!Ŀ g+\ z(6 ћ'~i>~,euu*زȦ(<Խ&lf@}ioFfv ^Fi Y#E{Hp:s{)h,t[DZ2hǡHܰAmBC#9u ) Nbuj`lw31 za"5h NT ɖFчw+}0h͐o&X JT&vb_;Xy+ƉG@c X^`6Q{&N')݀9RQ &bbq*٨LF^8e$y}cKba4Кb4j1,=.kkblkkk=z&Qm}zF[957w6#oY*6YG42ܣZ,ۘc{t||֨Rˁ])ۀ9!+ۆ9 "-ݨ.|Pv/2=ȷF&1QXjԮQmJo8IP˪(~*:׽MD3/'U\~tV0v [SCcj أjeEAGr//ٷ@Uc:]oJ,f1R¬C3!bc21DHa.6-O@nC2F%E,Ƴ zPdrpF):4.Mϱ,P \1}=%0#A`(幏VSE?U9#!O?n$5)J'ث3cBc*ff~,&kO^1C%`iQOѩi@+H(QU^&YiMP+gi2x.Sn!hup.>Wz{uy.>{ߊp^'#W/91}&SDFZt==&!.- oӖ17"n_~ƴ5GԷOmas'2-ccoZ}BF [ <6om~7]y{]HZv݅Gzjw!#Pʴ*/=՝{n"{ݤHs=Ǫ&&Xܣ'ux&尀GaU*ޢs0g=z]crP=Oj7u1Q;7/-©ukn795j\5McmS10avM'5Nt1Q;A6Tբ5jś듸 ao.l:;at5|/HsɧWCsuuO;|f:,w^$W/oZ5\p~u?~=Fȟ !娭@!gGTфm>vʸi|# '二<:Qt< 8y`AB+CGtq w<}-ٹW+ag=/%`g]~{C𾕰8RVby 2Gԥ9|.AoA1Sł8kAi| @܂ /<~3GSSc0 S-Ђ Uy.ԎjVf,X43qLM0!@cvMG f^v/M^vT?/փNXAr:dXos>ok=1-ϣ׃6i. wiXgqL՘I"A&\ ވ 4ͥyy__^x}/.էZcmjG J'7JvXӴ*tսZx;GoFroh̵.~seW:~3];պ_P ~jω1gw.˫?0WR'5Ja$3Bfs+ii3o$}4ذ`Jz.#JGf2 P:C+s,LݾJw# sAt2StZA 3O zlհf1հdY5,YT k?=7vhհd_SevԈ9WR!V635f -yD}U~pQD)ޣ',h'!OK_ _'F>Qj4jۼlӭNra%CrտP6Vͱ=MF[!=&¢P^z@-԰bvyUht?@!mG~셨E~GZ<yh6=O8%BB~ϖg6$j wgh6`. JaNWknXS;`FcvA;k&Y o={x{.N6j79oNJ9؆96XiԮQI|~Kvl gS'E{Ԩ3v]; sNL%dU}ZBQI|fq-!ҩcv^kkn%MնvU&SS[5-YN$'xkuHJ:!iy)ǒɣCh ]`R잰|{Fd0dNa fwc@PўBX5sL >4f(?r=̣bVaͯDڍ%FGObdO*: Vy:\xCmo0+Ǐï_/!'u~9n{ 7dACmāh8@@Ǐʂ]is7kV_])Q=5$Wo9W_/^*EDG LGgVQCQ'y_NӖq/Np-#T PJE2Pq%51X5vL4f?p~nPВ.?E!2GEy>c_jM,õYͻ+Rg̚Y+ƫɪQ{b+>#Juظ ͳCĕetJ6X 6|uQM6A~ёw'sk:"fm{yɻlaUY3ɼ^{f{тf:kCkHCAY_+RApy=z]{MJXvsbq뎬g{wё8бfY<ᄄ~9v0ժ/n9 FOН{Gߟtvbad aНNМybq$hp$ |79X}:~d@H!x G_-xc K:)\µ<Yv|v3#1X :OavpH=s#1 =ز jӘMot wSak֬wɭ?-؃[ߚ}qDkWyzZT)P1Ր###AxJ0 صL9]YFB)~ۡ=_ԏ?_vz9ց03i8؎F+T:U0֓ EvN('0߾ڌ/3lRpĜl0"!i![MUQ1Φ`h|, 5_Hia`^q^Pam]wNLyۍ#d2hzdAkwJ4 &ȲƎ&%?i1PQ9~%^SgP[s2,aFԵN[ydpfn6΍ub zgF-gFg~?ܙTg!BF'77føĵ?H~^zd.w+$W^֪ ɲ 5+J@֠`EVCb酊e*Y)b5:tj)$o)| y^sY7)??(Vr)ǚEPU.ڒB&o3ruY p6;dkq}͡#X1j6D։XBXr@>lpEٻ1'L_0L@|~9sPķf F M>/će'$#vS\~?L#ۖM~ o:̪SٶX\XZ^%z kTh a#MB҄Ɏ*x4 4i Z?4iMb"݈_WmSGJ(eR~ʗ+InŗdQ{ޯY#ڣIŗl"8R# }MK`_Vb؊Zx"_fE"F2)PX#kO{^-15l?IʜqRs|Ir(kazr. 0C Dy*-%thr |1Sk+!h4ٌ~ !toP!A09 7۷vL1DC"HJ"Q?6<o! w{U**o+AN9rXJjT4:Y5'ҶjDlեsZ㬳 3 }w87R%AapQK>\'( .?Z'Wǎ@%zu6@ЊTeuB/,0V%-EȊxlCZm?!p~N!e_T|4x |Il):q]QtN{r 79kC);lɲƈJaYo@Im,:􌺏'9؃,SØ{ǪIggqXqG1΢睁ܙ6ϡ;??IŴn*(̇Ţz`5Ǵ4cj хh!EcC99AdKm!&l.D0QWW_nݿ: u|:_jy]yBQRR"\"B"Vr"b-U,iPE+Zⵍ-}6uKLu֥mMs-9mFr&EH.> Y z-C]H.&f{rQIb혵7m^?98##!iLh7f%nӮVETJirQȵ:d$ fCFLpaK$ #MZV+6 ,aȠuƯU~8q?o[${RR* Nstfju [bM-ZduŖZN5ۏ{^ײGJetf&Qқzv[!^[B]B,\xIA2C^AbWTKQUrs:4Bky:Kyg ;U=4K%{SH8*X5DcRKpq (h Zy="noNCde|{B y񑿭`Iqb1{uBu~0Nxoc8b[٘ŇXvc՟8H=ac2jNoVwcXEܣ GEᠫKTU*\]rV!`/hEYnr"ҿf4iC^QTLv"tP+^Kq}lJ޳ſgo1XJ ܺnTeUelUuu`ˡ*t}0j;CׁƊEجlTT#@+- Ks:erGM:CeB゚ Dܹ9m0:#$n:Զ p+;Z̮ch6O/fuv72BU/{\qQ6!L4EmWTdP B`..ϝ *y! 7`s+*/,юVq)zqʖN9w0a0{8#` 8_|M<ڧ҃yZ̩}艑S̗Jy<%1Į s '.v=Fb*lL3;'ևBÛŠ!*}Fǫ[gUw.s=Tafzx|GUt"XefgSn[tK4OأtX1':k/mA_3ˈkqj>ܓW3k<˓lg'gp9"E#ť_;WYO˫-YaH黷_n_ ;\j 5 Ugtfyi>5 *USYQEd#\0WLeuz{*n zHvg74yI$/?5Eqgdqk LC:Z5݋K+=` Dp@abkv_'ᜰ|Rm3'p̅X'K i?y[h&S+?4o&nb~!ۋ꧋uy[#-O.N(MvUHx*$ցlr (S"t)gL5^m+5k%Q87'كrQEx|N&u5p}1R1L% "0taG`(vɴ7DZEΫ_7/1RȡrHmHm}tVy/d`*dxRzSvd+ͷwl!I޵x޵o)?uT8nc=fqt_ίnito&y,_Ϝߊ'3[s3 #s>%1{KA&[*c?]]_|AQ`E\UI_IuQ.BjȈdyƍT)m]q=(K!A("ٺ)s_ 67TRŨń`rT=Bl\KOǖs!&+<=lƶj:ge,JOMV|h._VMɋsH`p(K> ED^c&pJ:S_Up(QJRQ ]NQhNl[uuI4[L≜q4V V~3ڼ>N|jk!`+k`yj־Dvk}ώʋFh7\yc"A 2=pH;5N8t!/&d;D8G<].NchBK'WPVڼ3Β<@wz${&6w#Q$]c/ne:ɍXTR1zt*~WLnDr#z͇@ɍxn1!6Nzz0OG6]ޒ5bƲ.oY9P8wov`Nggb d%Ӯo"/U/gK10e'׸b܋W2#6/rl (STlcA)6`xX E$ BvPut(Ҹu)i#م̽onjN"QQ>V{I%(L}!,NGV}/U*]ֱ }7 [|2ɴ[:$ӎ#<qd%n /<9q媺xLjp\e|`_HД0%+mc7nj" 'I%vz>gRϘٱ ;=8/O꫋Y\[;>L>\cj? kBN1@9`B729_ȱKoY8C6N7,V`M$w[hDC3޴`Kv/9~#&Oڽ{yyAfo:o`މ!Y;z#:bHgl6\ϴ)D?sX9Z}b=E:`SI{"WC ؉}!-X!aT*6œkDZ$Ңoօvg6%4ꥅgo_>'y͉]s)ngCH!xƱbvɖfO]^|=t:boQ'b1Pg ``ЋoSZ mjKK[[{RZ* L7ޥP@"N*LwH'6wpj;耏>L;-.[ήxz ꍌTUeޯri1zo9S Iρ 8;,$'A=uC.VY:Y*cO5T}\I(ZrյڼR:F}+-8DWU n@Y\by4bwK 7t:MK[ԣ ғ~SD g8\r[|<8$U{' NEv\r[ۺT `9v{btIa. \'LdWWRs7< H'֛ CN$i|$7eoߢ_7&0և-Ty*ʪ9 KV}K<͗JD>sVb>$7}V Թ@IzyS6em$;bGJX_2큻a~jEv)lx7T]9/ܣt@\Ө`="6p9+^Ϙ(2: չ6RѯɵobG.P.3v-Ivdqlb%gILbgVC YݹmG$XA󏫵k1qzobCpTNi@5*kI{#+X/' K[@R㵻0VHE/ڇowѻG}{}TOX *Mgo=ISYq1Eh1sT6#m]4ЭwWғڗk`SĖJgLSXԵ4ڀኩg;zFIl[VwHö̷*ar ^Lb/bYHd&+dՒ,[\;3vŪ"zX,ULMy*|Li̛)'i5PA55J*1^{X(˛Ր uu5V ~]VrŽ$ :Ko914 CxŽ`G:t4[7ָTa.bH QP)!TI+ &P88 ']Ҩp9J)Rvn(eY&Xӕ64>>gd`LyRɋ1UMZ~-Ij.w!IR쳬Д[mCXݬb3TCjHyL^JzsС sKH@` N)@*RLE6<$u"Gh \revjnL4i-A(&_utَxރ]ҵ[o ":Z 3OX/9 lH&㟗g4GxHG. '.]<;>y̑P˘z&[Ԙeq3SQ.7YCN/{:س1hLjEʹ2@^gK3zD,^VK-Xa'$^iIץN4cuB.MbLcaXjT;޺ݕ6'(C˱c887 CMG 3*V:q7,[Poli *\aҤ@%L Q\aÄfRl7f㳱]d&4id)k5$4yBk7>N=tX)@;-71ơ{rߕ`h(-ꚼLFj W_lQ52xtA 5xB ].ADM K+Y` 6ܜAַ}9Efh` KP+xv1eL6V\-Dygڜ`O%_դ6:1Qs8 ,,MIY NzXr,;CJqę℻HgS4+|wg>md9`^ХF]K~:&F6&k+z/x U)1s,*4h CX߲T^]/OIy6F)Y[g#!l.Sk)Цq.UL-e!VYnb8b-""q90q6?8͋N^得sβgIsK?\VSDF(ͰSr)8-{۱Z敒 LmIPRGknE#<19jV[񡦝3Se\\ՄlHvom{eYPSYD>QFiL]ƒ?Q EۖV}#/ j+aQjQh@2l1jaLbz`I+nPIkpa)l17ɥ +PާyigAB?=r=J*HƏOMj$Jt`$-dDZ(,_wޕIZkNziy`ϱdBލp)Z(^C#U{Z)oP4`!E8ZEoF;HbnE[zRs.`<JkC[m[z4vqچVMnTңP ן(G5Mn*G<=֢ik!L),*%BzңfG8:;#sX:o9KZQR#THmhp:@z8Nڑ˭[OY y3( wvO>;8gzMɌ-F͐bTcL7R2m$5S[Ii^(ig=' -}EgAv\8hD6nbLcdw$ʃ`ieӼIRc ϑ[]2@ Ӟ0nN/he #)W4hL8E)~JQjQ2]>{YFY%E6ܜRZXRm93d@.ih]e87^-6k]LQcVow(hn$Knޗ Ria =dZA`؊.Bn]]iԫ u7.+ü@l1Ց#ߝgQr2\s/"AT+>F*, Жj?.3hH":7Ɣ cz޻d;_/S>&Xtv^>.4E7HKq1<9S*O?dz96x-Zہb".zq(=KOLg83|mx)1t[*rG"Vct=>{ݖZ𛛍o?TR[)L\fυ}:";ΥTE"ԑ1FyΥJ \D|k1b)ysm޼_, ș&SO>g~e XM^9%EfDCIiߜkBiDp$N*pRO>M| WԩX nOKS1J4L;r+. aeՓޭBs%se_ zRVC[F FrP!y~-QjO2br7[>j\k=Af4J!Hbf%ΆJ6xi`v}$ǥ#_ٷv%2/xŏh)sRYaiNoG^GMQIQݯ!l},$_>Ix[ j7V2 WW dH[p1pv#|nEF.!]{΃ٟXԥ+DppdshF6,*J<`Foaz)?,E42.yɌk<`JzU;?~埿q8GoS|M>={ Q n,gMECx>-Q8e]e#r]s+1}3JpJm2!+n霆yi|b|g/nPyOnAIdyPnJvg@Vi/*`?.|oz.xl:M{VlmvȲ %mp#LGŲKvD{Ȭoh ±Hr*\ _5.s@0.e\GFkXKD'aRIeqʍDB1,IMcG,wvRRG=\2ڻP'ZMU'^j.1_j|3#4EC7CZɻ]jJzL0 l }lb\VS/&@UY>&@2ؖ2ءr9cREXdA94y9wF&mx[yp_t>9jex# _?P85h}gaan?^-+uP |SۆODۋO4tb*]t'ftg9A%]A{_ۯ/}.a-_/!TI}J@ˡOh]aFX`Xǀ[lx Ӷ|VP0@uΛ͸T:|ХGAB7W -[LYڼ{+&*B+HZL;MS 5Ύ\}6Kw%#:DuqF":Phζ#W9zslt F:nL< P8ceJNZ.쓌ՔZb ӍE^Zuәm[[h?^ J6t q*{4*xjK|)ȂU4[-Lb2[kݣi``[]X Dj8wFJX$%˔ YDd5=EvUd%J+@&).W֝w``ska;~;y݆6Q#~^8ϊ/ێfW}CGxl xkff.(R@r] ]7b9NwW6.fz'%_ձ':؀~lΧ>W]7f/9A<>Mn PJfhoeHP5vYx1ܖn7zX/\jTZP> @>E\5~v,>o~c6-vW_Mu=wTǬr{Sk7vxCa0[G?VnpS[eխnU?yu_ ry]o7W)zwe lQ3ֶRKN.Ñ,ؒ5#dV$C7o)PT>_gB/&F }Jel>'Rf|ke&o&ow&v3n11H>xu{|sAuBNwR[kh++zל{g-_z|0cKT\ݗ{AܭH? Ưt3Azr=3VlufE);""8ͱvnrp86U|A9ߩPY|7!BK cJtՊ!rBRJ IjNdqquf]eÛH򠉢*PWyT,ZTrސ #=_|$]^ܕ$yOX#1I{t1kK *tCkO<۝$鳓 U.mmUJ:ZNvT]f'E RN oNzg@ nSP+B /RL*Y$ۃ>y?+LgpF'&g<&~:YZdzI=읈z6]S5x>L'dgoY'gMҰ8f$Pw{f&~>G?!_NhmmFIsw3gThpJȦK`fOuˎR;USaa2nY1`\`Etє~ج<G~iJ9=R$Z5^bPqE%^j$SC~c;U(cwFF_ש~CW~Rpz@EeH kZui}!aා=( Б .۵;~X͸YJIY.L^ DIBP42pBF?)4V 1,1A*#m.^V8rl^oe4}1+H0m' gW09aU͏Q^X*Ϭm w-W/+ |.K&'8^O#`ʰD+'VyS*(ӽF' U2ƔBYHB6n42 opKeI/勔3.奩T4y׀irNz&_^G~ iW(_bbY“d Ye~WUbìJY逧 ^ԭY]ubܢ&p\7󂺽:u`oM&7Yf[7@CqCTS;0ڽ/>a.#coMZ}yZ/ln1mdf,V]M/z"POwzո[p*fQTm#Њ SA}ӊvQh4)8Q}t>6\^E^yљ;-%L>.r,FUVKh URk KKm4-EduIfp4AX=`DEyHkYQ s;bEk}mVt벩˦.*l˦ +*ur"ôZ.vW8x o&BaĆi$5oUdгw|j:}~gN2<{SF)4O'g08m3v67 ,LßҐ-[i#1r;r'c5r" 9% ȴE- %w| -%v}듋%-~rs5צ~߆@CuFl|ϛ! lpQ5ܥO\D\ߐ8c\5UP.D^"ZQQIJJ#*s*t`2{ɟ,|H*EWUbӔEpXI@'V#;zbf݌Wxޭ渑=zY*@*uq=OC/Z<.;ٟwo~45y͞$B5|@*]@@\ie,Jz4:-,[K݀=*8,]C,0틇_XbO,- `<vXN,O,3EnC{!]s`9PCKHUA(r)KtU-*\[s}ݺkSۏM)sh0c?'#T Sza*unAsҁ(\L4࢙|q>MRJC2CKJd4"P8S'E:vT[Kp(1 R:ԡ(k4Ochkm;=Yam4NsӼ>LAe3kfh=߮rnj^xtPE^|l?{~:)ƷK8ϾK8oܸ{x](X2ĉ\eTHT+m.s/'׿j>}hCyy"b)RP@]JBl)jqPhPs}F!O|WRB(AON7!HK4s4hPƋ(m2\(R)A[,\5R1 $B"eP4jC>C^hfC d45R3\3IK()X_?n _YhJ:\WW׿V)@7K$:T%pVAZVDz^ErM7U z[9Ff2<0.nvTdv֕4͖M bBM\֚/Zǣ  S$쿪/}` Yy O'@RNGz=gh*H*"H *)![%QIwu,c5$dIÞHyb h.AM˳̶ٹ~6*k_ҁVMm'õY#Sۚmu~ag~Fޞߺ΢(K>ۆx H@ao7 N]y WhZ~@]y Cվ"H9 o뇯N2 λZͥQψO ;c>3Yܱ`l dW1&OQp49qл4\ M/au3D @͆0+`eϛ R"zx]nQ`NaF#k hgC|+Ppl* *_-4O ꥫ =bmy{cs4 @c9y:~OG.nޏY1zm/neP=kqGl$k1-1~1dϫvno֣զwMl6Yv<=8\rdcwoPE! ^Lڭ/dǺ>ݰ&G%ğ^렙yL#vXe)?e.Qɣ$ʥHܪb,qʂ 4Y@@DȓLo8U3-F14ң4@`eJvK 2Lג%xAM:GB0D'sv,}C$ϲN{Y[@BJ2HQ⼈!H,4DrJkuv+h:iuruS\zR4j%rD2Jn<{v?KP4o` dB%lSI )C zCEM1<Nn6*qb0.y 69Fm@dS狟)NٗM3گKpQѷShNٱKnwӗL7nC):إ6B[+.0D,! }u%VE%z`(6r DL%EJ2NxMLZET3aS!Z ) %^ [Ф|L*8&$ {͏8;&GfK,ZC`ާN4O &_h֑zFde ߕϫ$!7&Yu4~xW!a˼ V 4!RGIp@.0C:QcDUgښܸ_aK FCUzHyH\vEqR err,߃r GP\;䠿n4@wD/#vEtd x)LғW6@ VA ^v`XR8#hi Gm+gyP3~MNBs6T e9N"Is,R"$Jzȱ/LӂІ8LqQyZV6RՅKLEJiͲ>ӿM9jn-l[GMصlJ9%1&15zyUYRښ[ "Yf273~WiȺ7?ή퍍χ*E 趓bJYJ#a¹'.=mTK?Bɳ,օ] 8Y'e\4`GPUe͍sx3,ԿYs$->WN׍OmH\L6Mb]]/vEu0͗FƿI BQw2ШyǼkBHs!Ⱦ.Y\i?\;vc\S*8e6 0^>Hnj]IypR7b5]^gՖܩ\z£ԂA7@~)Ruwp*/NdԻ`ye@(Oω q׬Y;1+oEr׊:Ѣ+r{kE3,VfA) PNI +: %@a(J+B[>FP0!6X_]V_ @5xkb}vm⇝zB]N"/dN}p4H%с,Q6IeIhTa  -1ġ`ȩ]^y3s(E0+$3Qbf6\P*r鎬tW2 fołÂW/0E# ^s!>hlS(V^21~ 6ѵuPteبѵvvcy==6zD2$wj/!;QZTJ+R77(k^-&&&D}[_59_I}VJT"N3dViټ<}uz_yoym _ 5$C!) Q؅T)RFRPS YsaJ\RDWҼjv\[bh_`'$\a?cIsDc9O(Or&` (Gp#OaȚiaINeʔYLŠvdۃѫA0Glն9)4E@ lOVZ(JpҦA[_5x$ Ef*)$e2OrF2xpYHGk76.ЗkVv'~餺x{_ ٘Cn%{k7-8T!rxG }'z'dzudz9=T˟Jz\/eĂ>gFG6 +N"u\acP A"K|ya'Ze }1*^zm`T=jBIΞtI2QC阨`>@gТ`asu AEָ띦5G;mȦ!|TUw~Z+9w&4ºgrW@k>q&Z?=V U5Y B@Ƞ/; xj n=HAvOznz_%2ZQy8 (ѣJ1_u޻ `ثHmWQ3ktO0S5"m \TP$$M_|$y/LwImMWx md9$׭9`8a*iSɥF.莽wfȪo u;dz3b;gPبqQoc4C7>Y../YOk?ӤXTt|d_\#k~dL3S_yhGm n@.db~mx!a0Jό@!M8*TI "J,@5\^t+qSS)0%P gN:<Ԉ0#ݿo>i9;^njDTѧUul$|19**xrZ3&@qaQ-t/Fb SƅIi^7.`u r!cnvmrs!,7rs@X;ꞩcՔ0Jw>]@) fKed<Œ XR܅G%%F&7,q*lSAKi\2%#v8\ud:vqîC0AݯC ;=e4K_8 @ԅˠRee5\JP41E HQ"3 MmE 2`  w5@b]?S>v 5OQGUJUe(nصC4QفFqm1qt9hq9afь Q/;P$vlbHHٔ%xY\ XR*e)& AkE)d96dAJ܏=0F}vmT( cΑ]ꛕD`\|G2u֑xZFUhwc$aTV=|yЄ #O1{^BLW%&^4ak#x;ĶM x]5&фyفduc~*IIbhj`FQ@*RDn.,bD "]7 vC0*h&vKD(hh1b☤Maj&qn hX *tE%V; jl6eRӠLj/;: ;-Ă?8XWys޲0_-\5 1Q\II@sܰkr|iP(b-B+;j-P>+^gds9щ~X%AϥN2t/fw~ŬרYLCZi;dn58lS=;bu`v8$'S ƣL;O2xSqN@jpFseV.D@-pcZ#59Lm8~ &@ZVGzGm<}]KIRJv{X߰ݔM(JXGG|,,̺My(aZr,.*, ThJiR( cY&h_މ?w#mU丛ڜwП')I h!:c\pUY]d}N>,/̎KpvbAq5➙;*jMVxϺ>º^n1$t vKCAsTVmN},WS^ٔ?0@o k*C(ncֈjLnصqamaXP[?;@[+5s,e|J q]9VR͂J f®8c-C5shceչ'R+O_+aU-\AuL(kԇ7c!`t<[Ѡh\/ NRLL %/ KkrA¬` ZËz3x^~6RjL^n䇟GރսsCL {:qdeQУm?IngM/!x .$s$0H6ik-K$;WlIV[n8 ZMba4%-SzoTKAyؘo\yI$bgCYbv;H{3v -7_gY on'@mBx &x·+XbĨ('c!RGqtsDs֋` 0lE\#<4A:V"Jxd(<$&bDT뼳N֌%H1֖Ɲ?D>D.lcPZPB2w1e2`zGghSY7y$I?͘ dkt'nPvV7sff*9)DMA Qy>Wf9İv]VvvwI͍,On>nvjFz D>=]͍o:0 ^MPͣiͣ$ v&vӂ|zm҇t>[Fz I>%]⨁94a*zm҇ZN[ZLb]cjg3P-6Cݾ&վ& њ(9}v!ڮV}7fإur?}=$ v5?JSs]3M`Ԙj3fZd4RZM>vv$U0I^GȎ a`O LUzos 6 #+Ռ b)*Ӽ*_MP0*:LՌ beTf& x-mr]KQxq8"dB۞6\H[C-uSҬ.ɨU L? g`q @Io|Om1)p>1@K-A9 TP9u*hq=|tRɓJRFܙ1/ZzK_(k P+4~ 5"F1q""@(緲feHA%O]Vht 0XHI`hUyŷD3;܎jʋ%xф `9@.P @F Ԋ`()5 !z9GXQQ憝kMmd5>7dd"u8ۅt~$oѧ%z=;XתrN(;Ѧ(W Pjx8dlhAF PVUk5cRmoƐwEΪ+;IQϔtdedEFѺ">P HWc.$Ԯ_O$";q7bmꤋ1&(ɶT?{q`O>8Ap(J*[B7oRضh THMtהl߻10՜btW62R͹df761F&gG$qSk7Aaާ)c)eL$ '׳ Tu$'/9z8\65%ʋ8Ę-޵E1"SJےM3؞؈r~l&ac[Oxۑxl&.a?sl;PDU:}8\&PͪkV7<7SF}=h5?<)H ~s0<ŻׂE2vIٶٍ3f4;(1\M+_7{6Pi9fE7(U *0tqT N%`@Zv}ߗr,/v-?C%7aw'y;II"$vY^+>OOG랆>1[ }w SχkX҅/_p_;нJ?.>>^5wޛ.{ܾo{uFqJn|R\d^'a獝vR{K_Ñ=)Vw/~=&r<][nTnV9ʳd3zv6ZM7*uiat)pf^F}!^+dyՂޅyoP4?|V"\Niy }3'D]̰ur ;n(ݿU}y^@gWoƽK0'Uopqz1b|; `  /4K&VW뢎&gx2Ӯm/F퀟/ES~ӤZ'h4ћ}1=k_NA"s.?q`ݻ+Hޖ]zW[ e8k8ӄ|Ne='Yo4ox;Lϒ%34kz22&߻Ep+pVyl.ɱ2h撙Kf.c?s%3l% 1pI/LAW.AI䂉%%d.YKʳueU6/"^S>x8-;_٤؃'tPrtz1-I _NK t[Zz` WfաުrTS?U 7L3 ~D48JO3 484~шx9HAR#n4NTXp1S-3 ӝ% .+!hafOˡ:0Pp6|\43$Kd&df8To ɴXC%kV KCP ut"@% ”R]uS @/\KRp-nՕ#oE*7SI#[G*YӸVj͵e/^ǘ L;#oMO~yiQH&SQ/5DfJfY,,&? Y뼨#{dNJ,fEN ί O|J.2' ])g7L`"3ةu_{L=&iqoe)W؈gkW` c {%eG%v,2Ⱥ[?YdfE>gPH%UE:Q8 ݄(DM"<- d Y-\jΒ?QcU0LrhRLG1g86Jc5"ʭ6$Ӝc6(<񺐅$+ 2(XTLG#YvjCۂZ6-|0I"1@%G⋳`lt6ql31eXFWhayr|sAJEʹJ+"ݥVRo1Rqn|w툁kaq <sͱ΀=yN IE#Y-},0:1EVkm7m jm *4;;B7ɧ><+~5N4:S_ U[. F?T Wp;ϤeXxC$`GeLc|! Ge0khۂZhWDk>8ٰE[ -[(W6Äyׄ\ ZNHpHe!# G" F:A+@{(V\ .A8.Lq`~$  I|+h l&um `u;aMOj``PD c$Vas#5Bh 44G.u; ;4Mpzrs7iI}c[mhS)Y<̓ /o~&saQ AШq -saIq&OVJs:wcڲVfM2yבKIŷV m['Y+h5%fgq⫳1J1fc:E[_Z~by 822FŵBXMv]:lv!)pI]Æh< ! _4L3膣 PpLcb #x(,cE`V5хU*g_b08涻v1*,:o38AxH׮E?S\ - LzGt*>J,V* (~3Q*/ sd#@tsˬ,:?D2Հ \!*gpQPEFӉMt` o$W 2q._= k4ƱucdzQmf v#$淟J)Or۵?ٲs?i0Ý`xwߍSz|(L"ao|PEZ?2k8أ{e8?$7GiՂ;jv%`h_{vu&GrERCM!ҜRO> v3&^-CCHsRFwcFK0B~v;9NC (p)҄ idy0OP NI 50\LFׁK޷_^E1,4 &1Ʌ0%2N)f4I4?|E> ق KEȧ&h#ȍ0Qs* %rd&ejJ9Jx HCqsm0#OP%)8YdC/Ȟ#!x3KqTq6_ 1 cqI>%Q)¬hH%dQegD#DC@H i#, #p˸(ڳ5yA&<;eICPd2QGqt-L(c&f]iZ$U]U#*QTi* (ݴNkGc%y1{d\uZ&"VCk٨Ǩ֋\ԴNg$5E%n*A@LH}p]Cy.s6*elTۘ7F7"5+̮<ӹ[ƽ[ Lkejr1Z=lٗ1|9יgF/ח4ƈXlz$K,;(vk0bG$|B4#R%:A6[iV N9rmrd !iQm1^SV@Ijeʦ eUќ;EZъ[3_h RI}m;Ok)0</Ї@.J?$J0vK֝d Yifi#uJ|GyCrsv=SgƄaz{JN$H߰gK|O6F!VbTȋ :(lQ+ɕ$P :/z_/_ '] UaqR-K:?< E5Or+ɥ+"«L > ~ƵI.nһ_,in,u.{cL-v)xIw.f0fU'm:DZɭ^U5i,&'6N( +\<<:ZegZWfZ:^ҿwQߞ?% &/hNLAJ^hS|w)*OC3\h#K;ͤ>7n3ITLN(?P\{9OPDrùT!"> |D؄μ~нwϷ>.gab>T `>ףXz \khTɦoAz4`(ĶurG&& JIϧ)' F0S"XinvT)F;ݎZ[)J=x@)QnȌ Q9ˢ*+_40.p `B7(ߑ:9b|EAr7%.t\ 8mm]x+p>p~r螚:;_sz+Ar4Cڹ4HDO-ݒ`QHɎ,BR[ݲ`'?(wk,X5SGRŃ{mkuuakw0/,(ùgT|b!pnk0>OtgyZ'v|!܇v_ M[Yruu&-D|^?aE*XaWˬZ`[2<Օ3&9'2vDg{|}'?Z@Xt=V d[jʚˏ`~k)qpU:k &^vI#q$Ox;{vھ<鵿ǣ Uׯ^~y~3CJ?<aYMzޚf'ի?]| yeB|=z+Zuph:2Hv۵#\kEֿ#_ږ?(^]_ulǫ{FF&&ն ߌHv-O@n}:WYxLAWM{r:{Wֳd_]ëY&Mo:: ピ?­~=;;yaݧKۓgو^/׃#3З6F3Q o r-Ng}n?O?..uv/I0f1w ɫ~d``ҁ.~U*Ij;-8w8/emItꙞh?)0h?{zeq<`M2hĎ#0y"Fa^Pzx<É㵈Kmdjp08 {x<N<ݧp5 a$I.N&1h)0e7d7{|g)SLA ZJz0ulj|9ki#cB릟h\%)aO)4 ;s¾iFf}4\W?F w.V09 w1*fu^)+h %y/[`0˖@1/zT.5vaLi:Md __y|`/GnSňt%bL$"Otl^TҾ |Vk,zji^ٳs²ס1{gb<0a:^等LlWe;߮L;uͩmpo{^ ( Zd{,՗vjz2*x2^vtE= @y?NV}jNzv'夢b.%KpD+ ,|ػm5 )/A.K.R41/V4FLX0Z?g ^]vV\g&=n׵4+% 8̼V&>#fqw8i`T{2Hqw,b Y#X4(O}OzHd+$HT7}{NuuUOIMB֮Թ,o݀ܘB,l>d hQ q䚳J,V{o=.z{} ];"NKrF٩鶭%?G#~U'uG:Y0}>իzz8XCBH0+$Ayg Z|$o:w`oA ωƚӯhܔbz3]-3mO}c]ˏzOÚͯ[+w7cBBrE_wCu5'Iq|_Ρ2K:Iaf$4]X)|~1l_k! r b~{ZFqo PGK: O^ /5pOl7VK5Sw9*>`ۿw\GYs[pVnz_>fhz@x>xöm^{2&'?Tw.: 䞇/Z/WOor\|(U|6u=V}7?]]=/&_8Z}qo\O} 6/ˁY֥FHIg|NsXAãk8 JZٹ,nw &mڻQ x^Ի]pAυA`h7 R[g$9G-':L_9 AhqxHYޝ4E0+FSvT3978cJгO}P>msx?}|}cy#~:~L8ǮN]\tW;io`u'O&vO_tj#Eq }uވ |e#/qROiNFH_r_7r_tH;)F^۳J{獡tHǣ,/'Z4t ^)!2֦6R0n_?Zmuz1Y|NP[ri,]KWON),p%J>)֨0Z9'<[w Wn8yzc/nC,}m-ל,>~->Nf苿9M<~Ilo2yr堮K C֨V^? ϱՐb)ȶ->5A,5˳,4::lA`t/SOXҠ8ٲFqqurtC #[jȱs,p |ZKj3de[:]rr 7Ûow,_Ojy.hTYsY(_}|wun!mşϓ]0_nNۄ_=o>}|>~7ۗ ~o}ٞї URRZ8,#Q"P2$ 'lʾ/^M+ܬ^䁵_w1,᫟lv.ήrWsQRdY@:lu\A ä@MW*E*|l )ۢ.FѼE]UAGI*c_|w.g%R^( ᩚ칸-V2[ 5`;md8Lxܗe2Yʤٖ*2Wx䳵m | 2TF\rRE&Nk;ұZ[J(%ёGL3c[Tmdo+1xIr5:Iȏ>\2B(-ON6kM|IUd I\ئ[Fb(| tlX2`_vΤ 8`2Vc% J hhKoRF댉HMdoH4os}֭x1"/E VhN  7bEjD q}0- rg?6t&IQbVP UʷZI $A'ruJB@ƋԁqS{~zpsMW-Y򠁩p14;`{53"/aȃcIW&TUR0nc"F@2 E@YحJh 3pG[sB^,TGח@^''G(,-pi[&sJ ]V#Ir'#ӻx]n妿3՝CLEm]LjH6^{@#D ~vI6DTKU@@Q |Y]E8#d ]Jn&C>c,CJKr AJA3}AN 9A\e,73XK4"i8p:H"mP<2GK{t$@fr` hj P e v7dI !]4̹HUTyYT1gIXs3R]ճpgi& J,FjH@)̕t{~EEaU43_rf<. f\M9 g&BK֭hL6"ʘGm7vknfU\3&Y 暦LV(` MW-U04SATE8CMwFEq׺!6a 8Oqa=9Lvf c{0]+t莆ofxSA9mjLr/ә\CD[kQ˥P4]%t*dH`r{.zxBN2r5c5T_]a$ T ^ Wت|\(x+1trpt 9F}; YV2 -8B&4ca ĈO\Aag TOcp@[jrs2#^"48HpceR w35c׮V \#|r$K{fR, >(%tD@?U =k;jyنaL Z 8yHWq`pzrIܴ˳p9Y.H .+޵7JZl~Nޔ| Lef7seqwm,8m bwum3q#,`1b0hwWB`8$bl7q5EĎ_$)絵V,ݥeQiYIE+2ђ+Z U2Z~sqp?\IN]^Px"c>ϯ?\h*rN-ndlg^|z?]}?ZM}E+iKB䛏jRgw@Ga>}IãA7Jj`G[wteuyyv2-wxl=w #?D ~ g-1^5)RXv_"_y@56sq?喁TT.32fNGZ)(db R/sKes!L LhFFn`] ^F%pdC6l Y7 <Jz\{Zg^^u.UgGs^= H(l[*iV!f.09x.~-j֨ )K+?.ev*iE&m "}rvk;%`vl^E`^^%8hױꏺ#,g~:x|FбBJ}} (>t>h/-C͖Ϭd<3_N)i!Ui,cTk`DI6dYbTU@USD;o(G2*\8o xs_L~)+sE8_g׀)I#fb}n;Wg2}|jrP0Y ph֨u9rD J4UMK|]?QOZ@.5h5\+8P\TA+j+J4"Ӗƪ3N֪ :ku5e8G jmٳųn3 {=x֠p5(ؖ=#{֠h =@DҳYYʰgݷg^Jcj"V]t.\OEh,fG&34^ƙ!fJ-Ƀ)d+Edb꡹tc9˔}*hj;-'m`%~ౠp}j_U Z{jr1t~:m\,WW5hӧ!#nˮoc[..3!bZwq޾ijr }rVR",fQ&˛EEk }u/pۯ~=YϧC΄#Fܪz4=}:}}p wB:NpZu9^+%`={0=x0{G&}/= àPU\aD&{!Q- CDv5Fۏk*Kzf m2dѱT4K~QK(l 5clZ9'Z]M he7( n TptR^JKlbj!5_>@b l+Wh:+Wh(3{ x-g #X{Ԝtw209FB'?2TGnsH2kro$Ɍos| v߮r .ݠ9,%Y4C+ '$ȃ0;i{STs(UJXKQS-A (뢇$uЙUĩ ~2\Tzᳩd׈Q銣sBι8rRe.XQ IOI+-G>9`KH = ڞ 䌖] 0#``3= ^ru*zVzEqdtQ9L9I)9g|*>]h*mQ/=*]"H A,&Z}B-:DCh+8tV8Y5eH3VfZ`췞y4@Kx,j{-xaa[)-uܭRk4H48Oܢ2G :nrtZuZсꂵRdd@^;ǥ5hɬU7^;VzRZ €G}"F;o;hhf:bUu.unEH3yxh1kwfGM`D,&#ZJ4w Ч%yGEi >CUD午fP+* أ"}l5c~3cwc{;ZWBFT9G 1Z&=BLzNz8AΤG?3ѕ|۪jpWQ)8̴4}w:='˵a.IoЩ?|AO z { 3n&(vr˫lRUbB+l5*T51kJU(LHAj\4jt3`$ ;nJS:II"5^Jk1DPXС\NbYɔL;TH1rM:K W}CIGqV+-pg7G5B=ԓ&8a2B5KyK%Z)+&&Q EhL*(w7!~` )y۝U cI*0EH`0ҨR.[\$=A`FM&%rA_y d|o/NYWZz-%XoLAjri !Ģɀ)!N%@*CK.sV{iFn!Pu&@kmZ 6ّ"/T" &t# qei2IT-Zu5jrX0KˢT)(, Ҏ ɠA5Wk<&/b5*{al f:f~0 Tq0)t`_k_Y:{Zqo,RCQ_-17 ww1{[JLʧ繜A"[IޠcZ)<ՂIÒ{aGkI0(2 Jüy}'aIѽp6dg8 @^+*y~z/&c;f~ < ?YބuvܾiVPW2d"82ne2ބB}A_KG=m}ua浭;1ҡD+rOW6&tz;'&VBNpZu9'!29 #ay7 -T2׈Y%⌔ t޺5H`O1D-C"!I?~7&cA ˽P)2G2ӽc=(fʸ9+޽CǧďvыAPڮp` )8]銻4<(qbUHōR1CP8}a4 $ GdنNCnla bfBaFl8 025۠نGF4ܔ#8eeM4]UK\U53ͬs7>Żgg arq @Q\Q"Z9rኊ]謓L.@.n\0e\2x B&!#k7A OPs]#(r{|Muo$|AR81xZ2j[=LEMFg|62GUIUXI`RI=>z~Bϙ gl{kl,;J/n~lcS. vrp Wgth}qo4x^Œ~ JJu[gHpiq6YүQ)C6QrV`Fc  $/N$Nff~H4?i,{=m7+5z}hNi v|~<~79˃~emehPq/h6l!eV1K0E480IF ^@HՊ\B,b tJ@yrr"7ˌ{;=N#?}A'O3`/oFȱM$Ywq:N$h!N}ST<7x5]ӂXBa6 %򝙦 š1khok6LP)$-]༵,A*և4X$vʦ%xZ<.Y'+0z.]fg5Ew`_9GlTx6k#1% \vLj{0bSd>Q颜2Bb1L; 5{Vvso^[hH¿B =3-U4 Fi-5V ~~hp@+nkv~%֗w;s4VK8ɓ856cPrK=*ıO}b4c0qq !RBG iE#x8s$DaFPb9iRI/OtIԊ@F D˄(G`F( G!BLQ<&V"LVd|; O X}R ṅ0) dT1MhP6k?Rx%c~.44HXa!!dVQy/#bH&0`bX4DQMXr+nU)ŔaBA7VPX81<=$C~9T3p*߃Wa,J$37dN3ȸ`2NLM֙~̓'/&gw lncB Fd|k8?Tg XkA6@2ɏٺGI΀IKqO )u*Nr83>dgҭQѽYD]'qrf-Pa@C,w &[,+^@C5 㧼sB0a=@\\"R+?Ȋ9fZ)7)BөJ] xm+ ʵ!P낅>?@ cDwj.YPKMw[ϔc?-%C,ے6kPIya!HH=Obfq% yxzob_T_ X0 +]K4Y95H ҽdzeQNC闯91uυy@3.Y`B13D]XgS~(Zإ1S #?.Mf5Ūr,= )7':(Ore{l0ʓ)@n=%Wpqz .MBǬ$PchZP+y\ؚꆆu:{d(J}"߷ a %|bZQ'.Z˭b<_)ufXzXw Cq9Ewd0cO$nb|,f?e(YȜ2M'meHk&ݼױ|N&h?kڄ;(yBdjsw4eӞ32*=!&i;NayΨZ{x 8GA!+\>JfM͵$ Nw#$5b>K*N#Zm~ͧX!5C^,QUn[شCROD$ҝ ($ <) kZ jٗR q4eC$$+oV{Zyu9eQDL}^?4 Bzh@2_gF,́'D({]:g*ki)`O=PTT͉嬟,.0Hك΢dZdY@a6]~odO!o݉>W\"C6-+@B|eoe,4`{w jM ۼ_vSn}#1BNi1hEW,o>^۫Ճxv4< nʊI/)J- ސ7ܐ)۠jaoO 7e"^p"3ǫA2^,Wx^ucC ?I#X@D%Lj;qJa!o=r5`LPz6GY*0I4TIk૝N3iTbh?evOЬ88wO="|G!yR4Ls_g|IZ!|H*fG>X8 BRa}aI)崭>n%|wWY=BHP $.y&XcZF GА =b `%G=Q>N(̛H^FJ^%k!x\ςw J'>MQ1ٳ(] /|bȩ b|5<lp4 Yim'ADmI[!|p>-raJ<}ݜ1RҧlE 6w͙P)/jŪIVT] r=q]q02Bql#+^U[qR.~#Z1v]y0ZbbSSX8J%HTkN 'z ;{*= D=!4p} ұB$fD1.%t)3E[(H|\={f9&p)hVC{hI!BV嘙#ZcV_rV DR6joؕ&L@¦$pT>lҳ!"ȡYGT-gX* ǫk> TӲ9DJ֗Uҧ MmO H_>s…>)} o dvIIaUa]N!,`X1}T1¹֗g-Lsy.< by?O׊ O0MF# &̣C ݡ V!!sGpC.柲m_8Nj3&h *8'bU5=egV<6wsM%s K_TfE ~?ǧ#AJ<m@KYjY*b!R>ӟx%pu wiogڽ |`>GHD P@\'i+sΘGđFzĹA16? 8oH(0$jT:̭>=]6b*9IEކpdlxdÖZo)EkjT=)Αj{+C>c* 2V09':48:!p>B\n^BSiH =Î%#K*dC!/D׀rAy/ם8؆lIzܬ\UcR9DX )'JoXψ:Mk˚-{5`ZKLHh笏;ܔ2Y>OĂFnBS'6ր[pG `1=0w ڰ:qci/G<Zd>ZZmeFEON)NhDɞif[ٳ[UBSeV2;R0*hxJ(9]בG#{ PqżgmNE1#rk#qșp<ۮ!QsuɣNL]xTqÙ~k<׃n0刀^ǔNEwX1Q{nSŽuoϩ;#ߥX O-y^liCK`Vfr `%cTmϧ&N:|rS&9H=h{k*V5c<VD( G`gZ`\qW'}A0&~-Nb+t#`J3fdo/_l dSGݶ'~S[Bh a`ޯ‰ge^IWu:TP`'S3ݝdAåOm@p)*XRK4$J4`zQ2G$πxbcF3.=1 x;]<9A9Ď?hǥB_z5d8X-,Y}o| "4-Fh `Jgy77LO'B_V+?G~i 6B\>Iv1\ lem;vt1FT8Jp4 Ax#nDv(3w#n8_[p\lܵ0a)F{S{dq2So>8Up^W=[nMOc<^c=^!XWx i/| cX\9% e]ϖ\HUϖ_06:m*wUϕ|o'~F`ΖU;+ŅY\G߷VrW_fGGz[-e4_g=rUϖJRvb5LE@zϕ 6E臻-CoJ0lCݗxAҼ8yϓJ}Oɽ= ^؇x nC\յߤ1>] ,.!=+ymP:*ip9jDΡG(ߛ}V0):-fϵ ցq/ƅ] mP]&(m0 B~PUI6-ڪb#j`g2_Yo Ds XUft@uьs AUvKy`F뮭Q͙BW\ڱݵcI3Nq Wxmg{gWn:Խk~ ?J^Yzu׮.x{L-Hsa̵ߨ2;_EqO޵kf QEđwf\(0gެ>5Pzs,S,:e"Y7L.4f@ȿ]m,T9]u][]>}ӵ8ȫkvk޸vu=$`c\ӷ?{O6_a} 7%$mlpt$v_5(WSڙf]]]U]]=TU3\Je婮P]\~Dns#MJ, =h2M(FEs'ѵ5Q1g[2`%դ)>Z3ET.,g k˘Ri5X+{;C_8lMQ+\+M<%/m~oC*łlf> pu85`+u3wޞVJPx-y"f6خH| FkŤkxv{؞FxNѦoz&-e. Lܳ"sTt`,x *Ck+bƚIv^U}E<l!Dz0wV,PZ5[5T]ԄreǫAs5)JT>HU va12Ni(0S)AnpL͢%a`RƅƅavQ ĸ^= kErpG] TϦ+%85pr۶WXS*3,A2qΰ׏/ 0 lgu1C)%>e8. >@s|cTEQjʄ]pghX)mˈmzLq}t0SE=KJNh5YSt?wo}-De]ߢ;[{ۻ_U~eӽU WkWtcdEH+\vm eqm6a>A%>gJ~̇ V"x n۔Sys#;VmcXJև!$^ўL39򺭬N 3gnxo& 6vmZ'ڋM{Ћy##0lg/\AS];%"lMHW*K.LBkO%^#a Χ:8xn/Gԓ)RŜj*`ECrzW|Ҋ3A;ϳPk<ƐsXHIu6 繰%8jT(gSV.]kQ~BOlX rgzhH&|9MwNw*s 0fq}K߹T6˵ ^SnF(.vp` $D >+'q,?I_y.njQaDV^x46@A3#-'sb`"ZW8m&>сN5XܚbZ/7|ZA*n'1If|rSb*5T7^ z٩ҊgK򀃚皜?W@gr~ 0ŔXרt.~bS;ms04ư(XIŴw\1&eQ~1$Aahd$DjCjl((GDbBTdxcVL b6.ni '.\[$ PӀń<  MASKUqHկ?ɉuDPNNbw39 4޲7?  I'M('kB"~ztL./ ܄$1'I,p!+&A 1 1cIdDk8zϋɹN9w4G8Al\\_ @݅+$":p)qhb$AHigǩPErv-1Q,5Nbb L3 gsǗ>gy6ږ+._%pfͲGhn3xyk]|֏5$)X/ 0KZekIޅׅ> ?"%Sz{<+X?F18<ס?>~xX*EĢ8/N +B%۷`Ā\$fxf> ݩa(F.N6J_R݋/v5KRHE&Wxscf4@U# F/sk9OWVTK9 n1:~ه8H wb`<ݕdI¥pCMM۽˿S.4HjMQŦߖsM9kR(WfGNjgG&1bv2 FGjC5Wn]j*|dd$eNg|/ٜD] 2<+S_^"^pciKVP|f7W„{¤Nlpq<:R).()HA(%XkI][6k%l Ga>~_.l*jb|g[*{#ak,M&X:J{.'⒁ ip5m; .E%PxVJ  Dqhgu*xx }n >XIjk/ ԇpPߵR'@)ZoRe3+=0}5uuX ͈] |VABpD;|y % %'Sox~+Tju\ u$-F F Y{`Sxv.#L|Gf}#Wq[?-նTY-e;6f)aM\ 824 QԆ҆%!E" جQ~oSr¥ L ={3Gsx[ /7iN~.Q8orKo5ا=!͓T+Q}OdIT@*ʇޜq0;KLem={"lE.9`ոq D+ kXI >,9T!kxkeqq7ӝ..8<Ƶ5vZdOY+(kXz|77/KAd}Apx}207dF ȯњcz`N cfnO7rdׄDlllpk9ϑrJʖlӞ%Q?gLϑx>mFtm^yݎiOϡDd֠֓JoE}&uMy\AstlO!Th ,E:sV' |  mY}jO}OWIs{^ @Uctd{#BLR?Ow zo`g*{ݫnv8Ub\cLyM,+jh1aЛL;YV#Dfߣ1; M$كNZӹj\Xc܃0ZOLEov?xxk̓<)g8"&@MA$&I ð`)-GW q%4stUZw?3|*S7]Wj:B/ o$ߌ,AcŒ(a, H@<U5D]Kͺʥ%qG&P1K ;cc7˗"7XáM;]ᓭK8gjvQV]-ܽzlp;ZRʊvsE҆|9.LRHB Lj2M( DlZ2}rV kR iQ~Zn|x%NSw89dT?LFbZ@qiFU =LD>wwZ>!&{&~/,6BR+P#UyVȄ'F<"!Ә#ce"b$lN% ^o|Pǰ\4FL΍`0"&K^Ғ!biQ ?9QZ(HkpW*0:j||wc[uÚ?tݛ\S9o:ֆZa> ޕ6r$Bew0!@Fw`c=yCSI}#TI1뮖eʌ/288*32a>_=XeF7L#zTSpLD͘ߊ58Jw9o(65m"U"  2.{\կ vԫVxՏ@ςm&kEOJX@z9ՆN@JAx=<(6X Dn3#ţY0( lU̪#'!?k +Dp8^>c֦?Քj@¸c)Xe1+M$)6S-|0 #H8o-CE8{; #N 7] b0K v*U*{@f5bGʛ.: ]o].KkZ]oCpݶvPI ˛c.f:Ra:-z J0fppIn7+4ctRU2TLjP\C3Kwyڔ+2YyXaO{30wƅ8|-bEr,;-{ɕn}WD#ڪ^Se>xY2n]=_= BK'oɱ Cn~J{6G8=ꃤ%pUϯ2ܸ/˹q?? 2~kӭ=xhg=zW]V {h1w&R廇 ?4`J@Y_#?ADL>O >wGۿ6T)qv Bſ\/2;k~sw-RoO(:fQ$Gӏo@JpE V_K)MS)$SSm[ȸL(Ƙit&Oc 6WD]N{nwK\!l˳_jfO֯>IYUOnxxkUayC|s3Jy÷BW#ݑ渺(Xq`<[O3%iϖõ*0o]m4bݺ7\uӫ_jzEbvvQ=0WA3Ͱcq|Hڅ5HuHw87o-޻-A8l(3s\ηK6j&6*Y"ٛc姫|E=5ɅǕEIu)ģs=3ӵ5x R.9ԞU_d{\ѽ{{\B1T& =P,,UUwf[q;%y\u"Ux\i%|U *1)c1X`HYK46b1AsWH-rJD .ב $ފ癳dR4kupGXQl)XYMԀaAiĊ[GTH[])j{5L:_J:yžJOnyy׿/-> D1T>)OK./YɦCǩ?_#ԡ0q>eDƩ!RM G`4Qap RH Nv0b,`xH 2 آDsn/GLc\^ ,cP/[,RiYan]) S 2zʔSmZǚjr!R^aiY`܃5"/mmL AeȬ%L$P]Rf({Л(J$7woɡ 1<_Cw\Ja*`o`,blFj?K+@{J~rW ?~P_R, t}!?ʿ"w~5XחY .͝gQ=ke M^z!_/L4_(ZUVD&A .9.>ADތՋO? y=9;=x0VsQ2V71V䬔37,{ݰ1s<,8H!i%B?;ݭSm5x@H1ٟU_hg[ѽ{["%۹*, l;|($&z=Yz8?-X3+uLCKGe%\t3lcs7Px%C"JR!޺$DC\nɬڬKW_ipzhd[M;BF+6#:jH3֤5c \Y8AE @QQ<ʥF:` 2{猉+pY ͔:&|%+|)I2YPQfXPP(":(S# QHq6Ta JEY24E5՞5CZL儏 ܸ`Yn 6JҎF0&`=ie8 D  fMV5,) DR16)A&(@Ef&\.DmdVPfRtQYe C󼬗Bs/m/]1& {2ϊd7 Dd|k@Y1vC54VݙѸFE3'Ї&Ѵ԰'iiIY\?R.O+hVee鐁ۼUX&e`wQ(CgQ$G@O;R@؁jDź;AqtBs`guc.9tـ`8~2ufja_ȼۏ5uwi,܀\7m玙TB"?!{1b+Qt #!y>:D kWѢjdh={A;v[3Bۆ+m_V$.@NW-فѽC{-NVBTiܹWEqcPm%Vn0F*U8#7߼nvm.S!|NE)>~cf7?_N$=^*~\`O6V s㖏a P+_ː cXNb1.<ZHJ'-@R V01]!'u)ۄFN*!i: kxٓc{u{QHWj>cQqדuI۹a>?;߹T"ҥoIN|X8_2 h_nLTYٳyt]^|۰(>9XFkt-{TܹOmDm75l̂Tq<=/"Ķڼ^rp&eV4/{p_ǓIn2)9Yf`DZ-n6/7bȟu Lm[bf'N5ȱUފ^8\}TtO8(}7p>t41B{0ȉ]Ibjxj_TB$3MAee63o0pV,Ȕ)vt-vF luwE1 1ZsLt*}i ! UɞPu /212fS5t 9q1rЮ0P+ 2B(A EG/ul r4պǓf]ӶHtchKiˆ/өAv5~9.ԗ0i8!hgZijVN;:ԯS w޻,ǿE%.2NurV{hXGCOaj-:DzW]t],P>N}d_չJ0xhD JMon@QE`PZǵ5 Z֑Ȼ(uށ(U.?]k%%v<2B=-IEFG &q ~fow*MCDI)a$clnY*sꔠR28;Yþ6/u{_Yj@t z9yb ,yS]]j<Ѻ\J?GRk#0Āךw A+B ЅLFPxA0j"kYh-1!MrQ=sa8mwYw/ 'aIn:.d`=.ypyȉ2Ek>Ly1 {xt@aIYH\74Ov4zO)x_GKDasE]w@ݱ;:j=mOWţA9x_h#5ñ=IpQ{!GG#^F1:ѡdw+1Q):#IC6GhRDt4׺` E9S݊)C{{uXgt}q~#gǙA#*c#mW߯FoR {jؓBFa 4O?Mi#qh䭌y=tMԋd^$+"Yu+NK3K`I9:DRM$PҒ Ay4ͦ ?IT*j6Mzf\59k,@pAxNJ+8WF b% 3L𔂭NF՟'h"N>O:nBUW#'&C\ Fi*S,8! G6D*%T"JdE ChRV[+}q]7uh"EJ{c`rMYb0#YH ( CbY:$DVʀ2cThC0/mv ֺ7<` ʱGt(CLҪEjr?_&.x]J-D˧Gnom"+~A%cr7p~?E{Dȫw;;u|=]f _ﮯu{K.GW2F.yON\Bi;Ќ ӂ(M1ChM>٤sxufzyqv.(sҖ0m.Kyr.ZPAϤWװpҤMu!)f ׼SCEzN5Q^%pDDTi7g^\0Xܐ8ڍDZq~;)TCΊ[9VVih{\1ئCaGxߔwt֞xCas+0R"FХ2>QqIWggEq$SA9_LVC(Mp !ؠJ7Yv*=x?e2A\d$hjG.}aqFxK;,")\2m$j(CWڠEuRM<3 Ǡ̢etG&LiI@.qƠ }9g 0bXρ]P K[8DaNh%W{lB:IWgr/zOLn~C̷RW܏P VN_i,?7!6 RuόE`<øo C!7Q~wsIzQX3I8;Z} Ox6aؠ Eti4\m5Pj\ඩ֠lfg0uOn4Na!}}~7[s6+ܟm-_2ET=H`10>MԅQի_[a,mcl%NaLuuKT6(o\YBFYFFقT?w3cxZ8<9eDqŞGqQ;|.[:Siֆ`~j,|^Qsņ5y)5^{y A@Q*OnUE(;^?a<2ն-X~<oo0ha[;jV $Ovͧ#!$~bͷ/%d3\t_O׮/}:ern||ÉO-8NBCSՅވOF-F wU8 {6scMl51\j%{7ףktzS?}/iEhk {quہW7ࠛbjC9\]v?zMؾO_P 7itcOeu;̧f& Ua{(5(6^sqHgN:3{cmALr/6Lr]owxK0 sP5|L&A"GPlX㐅f{8H05\٧)rȆ5Y/iӶ.˺V@w;[j uK}Qo{ȳ霟rȻT sf=|)l8l53WVÇ-tB'a#6 fRY@ک證ce*:;.Sh, O)WΉҁR1-J%4)%"Rh#05C9Iwmm$z 6n#p`A΁K6BFf^S=9{83$e0+qSUuݺw`TYy 1 XC[DめՏ(O w${Zhx=WThdSH:tWTFľyU.t= Vo7)RVx95b/2_ËWeY/R|6c~O/:^߯IYq1jz]n7; "{"k O#Y=nmDB+ ɑYI X"ixQC~JuE h~63#ΕnK^ W9:5b!䮾cPv g߱X*]RN5[X9JlfJN}J^֭ۙ{;r95qX&>E֥ :V~&W6< _„q&,D$2(D$|tjZ; *03R F*B)>`X9l$퐛eeCMknNH,c!v-GhZ]iWGԿ^cs$vW 3}(ƥCz/^\lQnB/~gZE NwhAl|梶?l\|Ik쳛 ~/^{nr8b\z=n69×mԏ9P8Ֆ7 gT6?%xR 'g և)f{/W`tIwPT0)ޮx`|K8\*FӞ?E3M;Q߃:)1-СJW}z!l3Ѿ\iƼʴ$)n?8Z?8H]Vpnӫd^_"YWX j1yeǘ d]K>J9#tGZ2W}`^ѵUc>g!%s,A K ïz{Ag,MG/aPh>#w?r^M L.2/.x=oP%uFD")-RÃ/(LP3*MaKCKlvS ;}ˀ4b%]gӢ|`)n-/tDHB-Un@,ES.)f4{Up=` {2} Å:01U[ap 01q^ R-?wUIzrޛ0P,bx%'."t _ݭBv[#*u>hPLs[oy& 9Ҫw{T6<%F3OCs]j% ]gJV6 rV6[tA% J2U BYGc(, Q9 _^B28W@ǵV# QGxU}) }ZCg ur'V{iW#ɳ(A/ÄrL.2"..\` cF(JsI++T0qB_HAV+`*{Z-}vLmf#։Wq^0ր^Hq$cRHD0"aM |K۹F|}4jP[X`L~j_㳹j#x6<^>"Lcz%ŧ wqv]0D!k8:1=SD\ƒ>#j"Oxapv(]QBwT63:$ MAgl&lv>c9j=tGMRzD?w}u#0(3XgAPvPEj>H䨋;yhRM/\+NDJ:#!u /. C5rMGN}DZ˙3t)g S(>~߄O%}>` |JϞG*}7^ %#̹r,-\5sڳ1=qђ &OYhc)l` S3EAi#.- ׅ{)-R(g#A m܎&2H5uʴRZNrK)#TTxM#ACS(Cc!jǮoầ};Ԇ`O[g $0ꈢLsMHlhyO )\kʝ.2Il#Q/zD{#2҆ߠ!1S*).QoE)C v;)AP }Z9EX/2PqqxPjƢQ0T}[F`FrChL9ApBWD3? -4c81h:R#T'߶CZ3-lEF=EL:؂r(A0]"z@ZCA= Zf(GTFL.vy{VeM=H+Y5Z\?=UU G_)Aww㼢#Aҷo>%LHguDo_]PrdXml#ǟ__=kq_N&UwHk$F$ۃW?rG\ )Pb0_&0Q 4Lm'S;E 2"5?-S*U7\EޢN$Fԕ(叹ƶfL 5:=vcĩ\ՠ z7jgV?vL:rbL0֛"xqM -`n /pԨoE2tp:}V 'SD_B`"$:(HOK;#"XPԳhJka<6ȼÉfI q0#d)l11س+}T3Pi7%1xSz7MsL-b@4d{S9BGbU _ 1 ts8=7ɦ+ucJIzӛLC.%JV~9H4 b0iS 5SRV$D]ڀŔ۞^I@`Pzm޴ʹdT5ӳ 0tz/h^R4f[`za80RV"HR'^0ȃ&M/ l aJ JIL&eO`=BDVt#*2(r'p)G' EEA7 ֺQb[3& Ʊk?XIcR;&.忯.*wQ^SXN?E;Y|& ` PI(RGF| s[16#^iyD4kz9HQdBIE}σn=yW?n-Qgj<_̾ K7[S/' G,].nWݟQ5i0~1H8x]`\:ύ2ox6Ic#|%{UG%s3KP9(+i3:V@ Vh){/dz>o1 3c񎳆\5}@zI(W68g5ҕam8W>gp ';'{d2]<yY㮏`Gyg to' XBzfhztkz)fVT-«y"1A~#C{8ŸZIIy6gTP7\ y|雘veGvP"[sghh Α}^"YW(h ʼn{Eא#XUmp6@Svj?ffhaqo L65cIoI?;<E01B zqJCRQ7̸*U.';)en{6_4)Q($8N%4 |9sI9o} R$dCKpn>̟M[{@L{Mxt9M4GzZ%m\1w&X}xn&z;[n\j?MV~j0cm >u߱НeaI"bmw=daZdE0]=EYY?6#]ubdLxvrܐ|"zL!Ywg8R㜈c/tBD|Z.¼Zq4_tn^}ڜn]7zG.v7@sy^?}^leviہ9eFw6^ߒE[fnG| Bm|軉[lr5Dvn;ݦh,j}x'.o DMG+8*ۆt~^.lmf1m͗5@vSQ=v @IXH)dyA-M[ǒJ2DB'[,M:(}ۘ`w0 <o(Avs>@.沞,@F '(2yc#$2:A5)*|P30ʊp#<]GG8Qn3e@cB)1HEw Pa@F45K!%q[&߾f<-tO?5\l^Rᵽp(rAAjGEǡb*&] z $ϝ7@u%DZ &BKRhR)X`ւ`DH* C]TYpH9Z)fiHQXK)$@0'"k [/hP*x/)[O'R7|͆`|7Wm}=Ԏ̌?Yfot'Y[ҷ/7C#F/2p&7cIOO2*<'ly΁k`2b G!p "kw9eꍠ 7W>*Cn !nڅ*ܪ@T¤nRKeʈKVaT1Uv_93CbJvșY;j*P n8#noo^Te}lF~''\j|=AsC]/y0%%<АjUG* y82u6fy'K7|域OCex7qܾ16U8gF3r&loT%4mB[C7{l8 MgҚ1!D|l1PB9Uy6BMsIBz%]:1QBq}@DUԮ9[$p7{X(Jj:wɳxLqLܛerX8;z2'knϲ<|`UlȠ/u&ka~B¯K2x  `!0q (UO!DOB"kUe6S63Ta!D4oE`ľ&T0" ISÔV:F&)"/h,ucVVbt6 `) oQt+-e[!pozVtj\Ñ&0Qp~/mD}oZݍn3[MtEȻwwS2P܃23^y)n݈hq]qdb"-G,Y)tyqr ߋb' c" `Ę$-<}h/-yٵxӒq{'0YM'HKWOׇ_Tow9&s ^ BXafȄ8i$ jb@ 7`u06q8 I68pWÑӍY1UaC&}˲d wa92nӉqp$S{u+D~K䧼Dnؤ>) "Ĕ G̈́GY$LqM|+"0 Rl-N(OG)n'RynIGŤi^IS{AEP!i~#TlMHeE$Xa!a:T* f!đu%X\{~d]_DP62ߏhf>y^ DHdfIKuJc  KcV%DfM"Q ^{ԅ _ uM4Cz8weW)R@"$` ݐBaQIgqQo9V _:C@a}D&0 '~-,4G@b'XJppZ;ހ!8Pb(n$ #:>U ^iV1ÜUJw!; hGw !X ^Y MӦ|[PNmd[U&W2r\tݺu ˻D5ŊBc=S;o}:_[W8 dUUTsmԡC\6|y]b<ڹ_3i[J}e{WJga=]j$g1&iX&܎}mHU&2LR'OI L"M6pPdAsv݈gO[$JR|`W^mR& \Ć@3dn+4V$ %OE05$l~ rgA$Ans猠H)1I5VRB(*.TT1Qr-+U 97(k!P&U 2%ge0 Tbngq8/l} bXS [× B)4+M 0)jHir7,6c1ujLpg2$ITR44~AFq ?_x:vh@.Q[v51&3%\I,u3d %nL)GRl0ipWy+C@dR v3EnQM*BZHwBorb sChcs_qi:`G S /?Rp4?EGWgpW|i8^˝6+K 4#`*ɺvp{̙AV $L]_yyN>=j[8}sLfZ;%{}qj4NGY~=:~cn!37>QYC|`b-ƿ.%.MOv3"_#L?zR꿃t8 :{K;=NGf#ڗD|_~3IƋו2>%L薨RYE*"|ZH.;ݠ׬NTB+1BuW]p֋hvV{),⫰R:Q=eqF&*kKu_|I7Z7jq>>MFpğBz{A7uuK}l )m9Jk![9ȗ»7UT?T@Su>4PqbG_RMgO`g::r.Qb5Nx'E 1\:by \vELE삊>WWu?ov{Wvۃ|=8JqS7 ıkŞwJAĴjgq2l!& \ڰ應j+&h/f>-LQ&f$=#,N{trD[=I{_?x_{{9׿5n&5F=,rQ`~ŀysXk}S.< }ԋo"!/1D4F:sug*y-t]?Gﮑڞ*)+́U2Bpˋ*ዹeU[;0{U~gypr"ХXUcX튡x9Ժ"U^uU ח=ϮdC m )czO?D>?gkK\%+\%$A_s?麟׾;t{yupt|HϺ\B;MI7t aoB/ZG 5iܻ6]#QΊZn3e*u6>n myvy֑s7y }0(gem[篬p)x[jxxXV;y7O}sw8kZ׳tc Y=^U^fo.Idq\DHʞQe*iںF"gFY]N泑=ҍͽ}h{c=T+&LΪYgy'oFJS9h=cTT@8kN:'x hNG0=zDЧ I.I7NMkHh \6'([ܡ,CzxY:ymɉ21D0<Ƴ+~%!|#°4'\IOMI, Bm`F"` @Ƣ` xr)CLu28 Cxq-_u`~B`?uQK Ξ]Jot vcAC/!Bw!6l}@U( M S)1ۭfFHvp:<#0kȌbx2b4ю$`yH6\ Y+A0}v=`J 8KŐo9%jiƐ\4\qN_F4Yʨ=9L%cl$򆰪pt `(: #ds10.+!薲D!l`6`z`Wd.rͽ 6:e¤b60HL,Fx,644UF3: 5f.q0FtXpI€{AB=B(ptNv=-D-d|FKW[e69]x? Xf 2VpS_ #}wtҴ"G(oF5O9WweIcK#nlm흇]CS"&enYYͲ敕e_dDeIuc!8殜?|zFޝice'^&*fx{6IDnP<'j*Ԛ]xnF:(R(5emSbN ZI{L$ gnmA{?1VV]WSxÕ]/姏<<ܗ>_GNyveb((PA;d:lgU.e5g5g1fc|s f߁ˋ_է~q9NjG#h6ɤ,(7٘%xrb@Nl~?R(8#5w]HKj9$0U -zK2eS6Y>0wF^]k:~Q9F??Y?XrIS%o>1! Ģ'CBw$.(X#3ͺg D;E. hTTTMfU=_ķ%:XmV/~[/wR=jhyU;M;MPT|r6jYzzu^ Y卑tMVW.K+x{ʱF3I 2Rb'~}q/6W?ˋ]Zƭv탰Ƽ`^pBIj%m-iupIq@%A\DF_*2MЉPO|\3Vt()TJi0!Ne1pz5lLz 3tomJ"p/8U,C(EV@4pX5:Ǩp `!&\4'Qx h&( C٩%eVbhݣkp@&0FYΩLH)&N\+ 8/2Sd:J;< CT!&( 180e 1i"w3&H η @`yk#ZBsM >p%Rr 10& K}W=؋ YL1(ψǯ _1)ï}32_1*OCǯ׺Ѹ߻M |w\ͫr*o6U~sx}lS{%kσ8RJC2v@#OmXH%&1 eoa//{1Js{״W{72"z8(S^cGً7dom2 e>}%Hi4O)FZ'|a%bI$G1E~:L^ܳŬxlsŜb.bbӳ9Z^ `CP>Z^ o<6^ ևǦ/-/T-/ϋQ]@+!NhI7$!lA!d"8x";)n=)%bBbo>+/B-/ʋE`ϼXxfGˋUj2͢B, 7 W/O̘g&nhɯl5,Ah~gF0w5#'ybM1%,j?`^&wI=wZ Fz(?UVЍvs~Ⱦ0檨Pw} o^/@S>hg~T1hYtڭ$n_ޜxO`'0^/'0n@_nDZJ1EuJc(^[}w;z.4UD%wӭ[)N5z+5/R~gޭ 9r-)ȡ$C_ޥ% (YJSK07fRf~١_KKKC=H[9Vvi ڹ8k͆>[KRY (Сgk`oH3 =]Z! bC/ ҩ%p)igC7Щ%}OƬy0Ԙƌ:Ƭ0t15.-Jyx5fCy1wi 3Ƭ0|<Ԙ;j0jCSK0R\9oA<Ԙ@\Yq!`15N-A{F*Pcj]Z0*Ƭ@2Ԙs = ýC[K0F^Y 8Ԙƌ*, ZPc>D^YJCy1wj ΃1+}CSKPb;1+Pcj̝Z~}֘5't15.-Asnƌ<Ԙ;՘ |15.-Wc6`15N-A<k̚0!Pc4ƌCRjC;K0ҴsQojo߽;dyGĔFWKym4":#X̚M$`4) { <G&铍DJ3!P!`И3n>!~,z#=ly5IW(`e,,ZՋߖh3$bcy;! Ɨves?J6 c8*G dGL"LAr"HD m uh?<zU\e~UP[봘}|ArjGWFy .ұ'V}v3'78UF)"Ǵs>*oeLDm)yIVQJ J-Џ?߬F_mEumC!ӈh.aDi=0q/B3'BR;i@N3ՄR3A{1;Q.z ([)[ո8?e2kți9Ʈ7SmXɢQskBՊT 6N x"gKo@e o*ԗͬ2=]$½[`H o4'$ 4nuT*i)JҤ5$Ǯe'KcG, uPbd*^2$̓˻]QL<*;!+?P-b&Ip#hxbWZfD!5@1 7`W>vNu.rs\]OE WvuDž~L r:QVȮjtp\CC4m} dr[R@KFHuSj!/H`$ ) ҥ :vҨyuy.<Z[dbfȸ`E/vgjaϱ٨$շ.?m˔߮mm2eƮ޿f381iYE77(˳e3ƛEl>aie?\a&~`\mMt0\q&_WnDT(.W7χQ;TQH|Tͷ@p,S*"ux88LcNyv_}7>1&7K*5N)'#hnB V;"-6K PGb cRFRp>*!CLWCMd ߫ '=I}m kN9B N/ã5ƺd#4r&-yd8Br<ŜQ- G>>4눐J娏 teN፧HB޺9(J({ԫ`dյ oZZ0SaՊյkGWSBo;KNuBšm&R%4dugr\rO=1Y Um+.o߂'CAr`N\U䐡9Ņ:n)7wg#9&AT0ξSRf^{ r]0.iqWa$&ɻ}> |+_C뤵 OAy~c+&֛0 *1w5sgn8ކpp^Da?ޚ`F9D0oH|x 7LLJzGpa؉7 y]q\LbZ$R~`˭J]W?:OB:NgM'v C5f9FZ[N!1_1ƒ/ZU}۷*æ6g _wF#sWǜ+!cQ<~EX&+"=*o}z$hVKt8͂qǎ֍~uiISmype2E&J+"qՇ|l (C6<%`ۺEW A~/BR;Eu)Kд+WOl4|55*2=6,bk m@ʁT+xs`%{Jeשp.Bk~ ]VEղTPJlNj&}h)#S:Tĉ~+6RGacmNĄ_CՊ#q9#"$AcKU1rQ-4گ36낢sBJ\1ƓS#(ڔr6gqn(HgRPz2Pyϱƽv+eJQ(eF&,k-.Fw('3"O{JK͉"&~.vToqVm\,4JS_",Q#|Ǎ$  0"a8p;T \5J뵻KjM~ִTӭV"J!W︍R1ptݺ?RN[`Gњ 8N`pbDd -1a,mzPDsvX4"i]{`}ɧɚ"m enWUT#,Gv_|>&sWE?!)h`DkZ3kA鲳0 iFH5Ɔ袊 6+s:b)GfL(eiQG8k dž_FQo}X)p*_b[7IFd< 'gT)zy#hR+'X`%ʃ C;<&zl,:q@Nv Vv lb](M§8y/MY(+zb=B,djݰ^LGc.јt4|4fv7s"wQ`C ZB9'-w8Fdc(Ji(>%q) r|cOٶf!ce0x013P}ivփol2PaڌsY"ئ}25LM.SSrSiskIGZE *HdN062H쐁:LevL{76mMEKhoto,//O+Oۘ3wVJXh='^9_b}:vJVp[~?3Zo'_K9==R{ܓi(dmx S^Q$lŔX+xٯ[˕Z>ʼntdnPDi}u ,I?O2XuМ݃#nQ D/VR`$17(3!Gs `) &"H6`xviQ:=뱨8yFEODEE,&:2`sq7@n]sD?plUWJ{vjkQ)Q}tATrQ&k TY|_&,ܖp]TuJ.%\M[8V0.C*qG#UAƃV -dJmܳ?ıLMbdG3&В 8ǁԻ~ I,q'v?DuoQI ;&֑%%f| 힔+D euQc<д $΂a(X21"sVROtG0!xE)%0"@4!ɰGp(2n);;a0z PʹlhVn# V IT܁h'U`Hp0m#@W E">gLœ)̙0b$) uRGcVz)sN<[T!~uՈ(L# +- e|v 9+-cJfȍ2UGOTj L26T]NHM7?d;EمYkV EN>IcOfC$U«'1~0~ eыN0BbK@1}效BR(J!aTZ{ -as l'e`M=Bs@DE.gs˴A^PD0Y) -F"JTPSt8mP^L;HEa42H]t=$A<п[LEu+فo2߫xpzrQ`Fm,S$> ;3n-^tz{qG:: q2JE.H1}shw?nH_ޢe {8`Y$OdێglgFή=%,G14j>WWW-sJ|`y+hw:7)0WPX֎B 0+_U^GX 1͕fc[g^~;pf2uoo64МUXOþ?[ )/P/|W&M^]7 !vW}~}{Z3_?44y< gW7[4+.n_CåF;'J30 ƲYcㇷoy"+ <\YCvZ t$bŚ٪c K/ Sa4IlhM(> JжG+Ht]lD A`CR$agК6ˆi5& kX`äi6jŀ 5!?#N5MŠm>,qB^Ǧ} :xkU m;t-.LS,dM\q58BThXÕRC 2Sd1[Y!K VkU CeNU**R5쨗LdF5&(`E >r24$Lg[>\o/SU i6(m²PfWlGPeîPBn˚#) \l |4 "ɤ{ U@11tEy^1U__sy;Y&gD A_sr IuYE *ÈəL6П7Sd`D򌛶wװ4U3mgLDmb76a2e: ah@t A@#HIVQ›XxvuC'Ԯn;< ix 3J?1SH*V5pוR(*'# T"AOʤQ$d\yj?,)zJΎSX`k)_.9\f.$ZٺpmHşҗAy KϪAT1֒rNv/=2)Z"u.fS%FԚxWAMdm@F+[W _`>$p~?jd0w2)M -[$4u/%Zw iҞғl!AQcލy~ݻ1ˇz7ec?A;_EGvoa2FiQr$z~Y?p(npԉ͢>Qn_k=zY Ic('25m]kcGQveHThs$ս{zmzw;9j4oYBmⲷ7MiVmij;nu{/-AШuJ=#r?\_:HA`X>񥢳]3,ZsAߪw׭[8K)/`>Z:˿tWyTiǶj˅D~gxٍqjA=uEj߾v+R&1pv֬St5{y+BeZAxIMzM(9[a: gH)+.8mPO{t|S- d JY? 2#c_v,?sRLpmTZQy[{hk!!hԕ&J-3*F*~,3iЎzo¹IY,%^cJ3񠊂jS@1[l W]cՇ!$N q;aWVMbHs5"gs{` sf 9v -@K=-H*-!VH,CZ~O\ҐkL|HsyN߆PkW@n3UBg|Ry ՇeUWmV-]YmVB8':>޽3d,QۋvD61z.zw*ejK ZyE jj’M=[m9g#$n#ñ=S ՇO'Zwjv7!B-k`*ZY`O+rScC@ '`9OTԅQz"*Xc0 z (S=$9wT|EHbtZ&iLe2mөJdʒP:ض1]1XZRc& kj_jDOcmyھ2@?ʡ ^j:Jngo/\;|^'A LlC:L~GCi4`qETQLETQlW5x1&ϧLH ZYAZ۪:,05օ| PC'?;@8׊|;62dZ/ilm0C21YH"=hA7@)9]A0`採 ⎡,|A z1]"]&p:Cp-ő/)>v_4~#]#`(@`Ϸ5k|ɸ?&IT=F8C .͢,(QCiJ_ID%{%؈IںT9CI k̬c A(וQ!Y>(YiR8[(-s1ݯiۢ$UóX$زX^fO;,S2z_k:2+  ‚*&&J6œZ+>aߪkEERǬDVdXj)e(kM(όU[juYG_.eZ'0kl}Fi0v7XrT:[Rjڷ$##}0cGVh;&/zA M.,JXVwٽ`!JgnנڐpUgtrAHAukAh'fTS,-kа^L']<S><F[: 7( qn1S Dp,9Ba, 0Hb˔(ŊP\L4&W&10Jz v*z1QM(䧄Ut\7=^ݤH5)ƚaXjP̴>1Lkx$\]ì닛PG(R%!㧸:@S)d&A]<=L t\AV\3U;b/"g 5ͪ]5!]uVML%S;Rŷ]A /zaQ }Vjs{:N3)O-Y}9[_ Wdf1KߜG'kE?z^ ꁉ/fSđK_L q&~F=TV{͑m6eY$#Rޠ=#\ u 6]*ӑݯbYil ָ=˟N>/h|6^ÓiַpZzp+9>ҩE 5>WkmY}np+V~D;T˩ffpY~;E.01eg?ǁ@aWp^ ~~u;miERӖ aX bw3 ˥Kꮩ*f>J@cl vGtFHM˜#m6W LjζVݿGWjK荼"J0ǒU!>@E"8ʇh-7͎dsYtgxlqg >ib>@R4o3!ڰ>6?Gc7Ào߯?'(~^o]?h:l*K$QOUu%~r N00I@| }iW%S:7Q>6&8SW²1b:h˾ [|=]`tC޸FTX3ΝF<DX B\'Mۈ_wnѭ y.Zb̧+Fv ?֗Eޝ]{d*:<8gG Ktq~=7>[ AoW'UeLR\Asւ"Ȼ 1cZxN'ʑ`Dr!6\}?J**aKe"S?٪έ`-$Enͺ]>|O?@|IMq4; &n X=ǘD73I{vMKcl&(Z1҉_˖̓a 't{$lu"45[][IFPK޷)O(R^/A/VH+wv~[_|L>|2bɈ3~ tc jw]_WY #%9q N2DNˌÂ%5xTT"A"!Fll6A^d5 +bv[;6KO6.%=0Vdxؒ5Loc Ou4RoaK,47\3MB5N8o0)QȨ4S|gs>3Y![.R FA6Ms ʰL(f@KF/c.)bO-j]Xz_Nr|:\˧w%v4lC^Hl-on/cؖy%B^Qo`wcf}FmRK@\6IB1C. M_bC8& ù#R, h\-e@ [@X.a z->L`uL m<: k_ݔXy1CtiLXˏdFhl ёMzNTϕ_cLcvd۝j GQ rgʬqWȐRɵ_SɭܚJE1)dvrG׆( Y;hȭG3PVP[O1/pM^|#m KKQnzE@,¡CTD^;DžQaeʑGFQA}J,#2rbBw&\#1 :`|Յq)%Jm ruhwM% d9# #:f9 [?rPr@y$!=ߛϦ>TuJ:e4i4Y,i,-rZ9GvNoU^@QrI!!9w<1M M)'i"reKjPY c2;8p/&5-D{o}fr@ַl"%fXGVuVeyjM|'UZ2?ݣ44FL?3A}3>јW0)JE)k8GZgk|IĶu`.- Y8#\aP2,WHhDS#ReH TEqt7n9ĬX̫ȜHT30}yr0圲LL;a^s!\1aIqa$2(atX~}ܚmH!dˊ֕$'z-W.?㻇W])ASȇ?!%+-lg7$8#᤼ˈrtC szrsr%_\t.W}6x*9ӊ@4)JI<7`{ {$Q;cS.H/\} ^ ާ!HAYեfN1ǟ4wssr497{słY ZZ NP@pCΣIgi:YtOnbtĢĢ8ȆU,~9?35o>;gN41h: TM$ HUS#}ofSFfHK)Tn|nikvY=ծ\`FZlD flOxqv{eNj3@0mtQl\>i1`jp',րr%latG5R-yNUY eƳf2뾧{#z]gȽX* Vp@kV:) FۀGKG-#y?H XY; WAbH?Ŝ]LZqO5\*|mMle"f 'P&Xn\fwm^tv޷20C__j{^0Gd= 4/(o)5Ajk(-F*͙M"/87-_Oin`!(ς<nj)V~q&xL%l)@q^&WVcR'nYALPF]ʑv1%0I)2Sg9:Ks#2vO+c`D _k9gA TY8Mv<8U4@ ӡ0ڿϊZ*h:\&Ym܆am܅-W;7=kE?zVfhFn] a%>x}KT DdF{/y$Ҕ \10sz-GZ\1A>[~bS6oʓ~s[*#R+(2Yax3w`Wpta M IEt-iAT$aXb5 _C m(9=-&PJDP^j/5gR葷+RݴԻMyE4c{ҥ2c%RC~v bri[7+8@ˆv`|x`]4ʧ:ÎԲ01b:h˾SqƌSLH2yFmt*A>eDfB n47mb"Repn! Ţ5NIǷ``$7JA'aGo<̷AT@"Bu%Y!x{Ȩ"΄YtoԐ gH=剿K[ S29ٮo:MNsa_VcL11wHG2/L$HE McM#d&Ŗd.>d{:3V]|XfdzsY.7-0)';eE)-E섕\h,f4:g33S˖B"ٞ$Vsٕt#XpFEkIj W&En׹0>F`s)M$w QϡMA2j X * oad}O;`MZ5\\jF)~xUC,[1ՏIOy#e:rPkRtjt+:ܝ[]UrvRnq_Tip@_i脅deOdr|K+%_DziPJ DŠ `Ts6 B+;h*7(濣oG]nG_bJ県ʕR^-3ϧ/d:vsn1vsn՜j7fJ¸ ,^'Zo;_5 Djn$>h=9槒-}[}.E=r u8 VmWa7~tD c*h~8'J7үZ[C8'Q2˨/ fYN, KH2jP2x?!7EˏoTiGO)y]Úb7M9ёmC}mbK1-RWn9չGo.><%0'La3 JN 61Eh7CS۹ X]N+zq%6|thy*tܞTD3.v.XH" ʤAv.5$%ep@%]O2y{<1&%ȔT(H)bGz[TODѳ6ccR&儘bG!>Pb 54ҨdPN-"ujxGޕx)^{;-diEd80S}S~zX'fE:'|YڌtUHZ !R6TBzUk㤇{k3^[l"f|t\OFKmp1Z2_6q65}`'m;WGA>g}% wW3pI-rԪ&DrXЍtCIOuCIao7J||n,5tp f$(!Rm8sx&TMWgn9O\œZ,D'Ί۸Bb6~L->x۔q!E]e: TKHsQInU)DÒ2EUN)Lb[*NŖf d8x줽K' X[#*_h預.3ހc>i *~:iU?SjlqWzp ès1d<2wioZ^c5*\I#X.ty0g<.:F„<τ.dtFc$xǛZ)þ&5٨by?ZWDD`Zǭ/ꮶl f#b5J<cn6mD9uLItŷ6 SmlµSͪ+I)](٧?QqnQmR܀`xwT$p&H]U?އ)kDl5;Upw:7•:G ?&)Yto|l)_Ayh vo!My˵QX:.kmM}TIgEC.!2XɃ;pp cG[n3rB.~iz.%{5|в C||-2d+ӭwEj!bER3ivqWb kp./(/;S-_BB)'9X-ȶې ^ Zmw -n1^`E 63X5;~c/"?E򕖤e9Ś<06CB0Y۽ܺ'29M_neq cF 8Et~HV9Nd`=rRXo";5?P0w:q"T_Cu|xw!zt·)T"45$~ll dgJiH[-1NE]+(-VO}U ƐFq=}G7FWeE'y߭GYhl]^|۠P#Mž_޳[ i T9XD˧?Ge3fדE\htB>_E(1R(c003ſN;/8\1^j?ơK6(WEncF70r`8BDW6aN$px \Q0eN1C4oS*mSö+y)ozmh&ް7 Ft14\69>/)1(հW!c4 yGwPoyh[Ք_m˜ TK/Bk*)է}Ỷ)*N!TifFJe \t}ay J _XRMT1A|AYbL58'piJ F/4!%rTC^?iH3 8HT$" Z nu ?~ܞQI'BgH|߽{YO qٿ>;O:3~ϳXjb觥6O Egb2 ^pkd}s|X\jNұu D}nCW]c\fʅ4Dz%8*/+MQPmd@c/9VDxTP_ qNh,iJ[&m& s+"@Qa%Aj A?ӢNi8Pb[`*HK e9}4j70Ա/b `JH{^#B1VւhB iycSZurk!XZ,(mf8 J,<IuSV/yaDph& WT48^H;Db^*I\|xn{y~YZ! %+(ČןUX6 ͍gv>jd|S4Ng)ʿ#B˿ߙd~;+~#hCfPP-al10B4 F'A1VABG%ʏ!q `}[rEO#` (ȖM{8y YΥ"."Y̡1xCh%hYs<g#Wϊ t-BpvADž jrbx4ܓB +S 3UOz0s4\@OY 9N*ҳ1; r@$5XߥS'I-b)5c))tm{'KZ@2}ѫohI>6 s jB4S=O-OHT$d9oAac]W 5 zCDr\Ӑ^v0C*᠆{R԰SZcpOgκj>xr^J{ G2 r d *֬Ʋ?o _V#*G,& pąB¹/1:'rٽ/sϧ[T3ds> ?b2.˸.a5Mn3fB\y! /18/p<gSɨs|yG#NK%roOep gaqɏB6\[-BLWoequE.˄h)"Q'70 E ں+$>[vͼ /gOsqfݠ₨.'$^ngWigat{_\}cw(ScVF vhFO[77h4/3Bq?{8n,/ L#DC9U6:.5{^ȞbM9R(ꝐgdJ*=ENleG*.#1ȉg`M#^PϜ>pc}V50I>Vm * T\Cګ.x.u Sgkcu0 Fp M}fP/͔Z9P`zvyb4,#*2 b՞%qIq f`Q(,λ"Y*K/׋"sè9V2 K#4`Gs1":H"˭I"mgq%d=Ic%20uUFXy\&y,bL 4Zf +La|PYW#@c) < 3$1,"`C+k95 +kjGt/0lQa`DDqi5CH&af8cQ6ǂ(l"hm-3Ar=" -,X" _ZEg *y IlE&#+0x RB3zsu1xxRXP0k)(XOP fa3;Aus)}M.#.fIΑbCF"pc4{Fzˡu qT $JB*j#Ϙ'COӲ ql$l>B[r@h+%弐cwK)`j%"U m9"JNnxTd+W-pn3(F4xN` d;˕vZq#JRHjyNe8c2-'q32v>e2ijpĆ\C<N .QoӿUpȹt٣VqI+F շPfULKJZq gSl"'ge:ͣ:Ru49Zrr%cv}mjNVpY+0TIpU+=Vq9Io,62ɿ)Ods .; ɷc +0^Y goT AQ<4Y!yH"P\N~y@mFs쇅{[G~ǵ:kHPet[$\t?.D[p–#@~*rs>Ǔ{Rub9RަDt?fX%p=)L ǵ@'&Kd^L/}yIH|a?O Fզr\ܢ4*ɸ|Áe4Mɵi2r,38C1f=ڷEsa 4wF 8 RMozIKR 緥LnT]{h~=ҔbL;o:~{g\x'F- !,nR\ N4% 1rL(e@%VIӜӖXuJH] BZ:556Ьm( CV`GӇGLυ(>|v2'Jw2ngmr8cz1=|a5 ̌ザ@ҿt\,t H2Ɉ bP^N\a4_Eh>\PeOrq{m?ŧo/lOP`7Q*F05 kwú/iWv8J?J*~.~ݟ޽;1ۏ'W4bI+:*$([N v?%ziߜ<wLyOocȏ@ nb<yOgIxw6 ~3?ͺh]wz ?[N_fZg˱u+7{r\7υoL:Sx~ `n:Dc>Χ>@(;N;x330Ӆك>{\tu dW'%XūNA&ʛ!$1o_{縟^1<'%p bL73m?.`+>} |?"L0i?sYgxx}i A߯|~'”Vſ&kԗutˉ8Qy]F?_SfD.>9F7 h4I|cҬpɤ8H 7YJĂv{ c5ř3pϑ3S%!DΕK`4 (VQeAJBuxR~kĽ&!Z7|C^DT{DVߋ8cH)S-{zC<BCH75b5IOVDH$r}$pW)M>$]+`A4n`4uLdh:!a(!H0$0M#؀yk L/(a_ܤZ0Y g]RMFe&%EAJ%mj&Fg˻mP|0+_K*qdr~N/;j m.vew:.@ {A4&$Ϛ &P K 0,#uPbRvanc2m+ּ؇$%~ĐBhボY#&[ LW% j?'I I0sL1S<BhrG qr N4riȍEn,AT҈  E*F:VVc@ U\A;P#uߞ /4K{%tSxp]-ǃyL8WyQ sJHM=0>TdX LPA{xW0V*̥̹g(CN WdMX5 H*Nׅge ح`mx@,x'7Ego/f,.\ *b(*@ x\R&J.`IE$"Qs(k:N?NN-\k*>63[6,Kb<GL[|| *\| > ~DYR}Upe/T"A K.an"⚌+TdMVI05߸`J>PXFHk*Vي65JzP\;r1dLCĜ.06ވ)F=܁nUp>GtkIc1=Vt?A 2P|⇋9r?S~M'0 +-Ӑx= FP.`;# `GE߾wNS9"jI4.Hp,ro{2Ą{ IM8'/R![ٗ pyp2=8OLGDJXfteƤ1 U5֕P<.BB1=v|cm낗-]Co[JZ:lTr-O+^s lCB K$v% Dʗt%j ޚXc SÕz* {AIlI0Z Jcj-(F2}%r͝6(Y"lCe[Qj_+#9,@{+qACf??rR|;}.)29YS)2xoa5 ) LoIa*jq7˰)Ak U!F4D5rV d|k#] $/JN6uzv4^E nll\]y:KZVcG"4E8ŷKa ѵm4Hk='FΉtF༡%kƻ& *h :Khp2#6A6T1K7hFB-NC5D9Es x'<6BM۫$PBVɍjiKX"5z ň:z-'-Jd 6K[ prU[ާĔ.ڥAB2 `$h`~d\ۢ'HGZVT *,Tr/kyHu@!'@9Z"e)A) ?EI / _bW|`ﶗR|^`bx¢5݈A51 hr#8ܒGgo>O 01pǏs7[~kgmuD|ﴇ::a:\Ǯe~|<Olʫ;[ocr*}.^ny{6lNA&|t9 |? sD"sD [Y#_Yg"GzCtc#ن1{zku|gwy=@зPg!ɽ1bKE3m d;T&I a8Gܥi"'6U.̓__ٌOvbro,W z_nG_̆tiaeٸbs^ FacyAհ7zl9wf_=Iȟ\D3d 4P aT]$} KTST:#䨧vϮ`&\g<_bkG,~5+[;cjZ2Vw 1WQ<*z'GEOe(HJ#z 矃md*;=wU,ru˲1THcP#sd"sH3806qvrX:rx˥.GjuuS,f7mnߖ`9ZŽ1jv}Ճgv ].s(=₹O/q<-Pz1¤eC$ZpaWۨp)eAK%jMα@`p:)g"<ɼV}dEHRw@Aģ>^K=we'V$XW@T*ig6Lf.ORVh;bFmu*ۇ^eW`݀U!'3q={mh(t==?p$ 9Pt4i}?-Nv({XITzҬ#"@Z\t ٮ2×2Dbh_TFz'f0ZF(`aKM0a'SpT|B գXJ8$M9Gw| m`H%Ћ׊/S04Հ%R2Vj }-~v~n/r1*7gjfNzE}9=^&KȈ"b}`cӁS2$kE cFAcX-@rOp&)ӂ*loh3"iGyu*hVRhHlvVYI4hDԔ#k f})rQp'loZbFcr(wV*+*`I;xu?e0TQj;©%bL$gӾ (˦.8<1rH(1<$Vx^i4_M?=m6J>KJ:F)'$j(VG2ĺ2_H.g_}[ hM: g8B6:KA:~o}Ƞ Re+ZIM=V^} :q9~vhlUҼ+WH 輩>ml3:1nMYA6A(%fi4c]8bao5 }bn {H-KUM#,ilF@8ljͅm \J9JVמ9y\P ކ!bNq(E@iS+ˁ:`ZjojAH^pJ]UK:Q(헢CT#!w.'OYLY\(gO2F@D6eꐻHx'Z#>=R2*:u]۰_t8'J 6eػaKrN9 \cΙxm:7E epURM=kJ*fSikij&>L.Thr0g 'lX%u`|fp4 VK]$fbٳ4`T.ϕ̢!heH&г75c9`lUf] GŇA|9ǃ>ӃIaeP'(k;a:! j:\Ep埥@0\i.M m$JoeP )ܖn/9-^AM8pz+}sûH8!1jUJ+*A/]}x׫MYžn}F'?Tן1G珱e@BY}@ZT[ZQSfKP2%Nv7΢bP }M4WZP!]DIP:~Ή9Bwj0rl R5ړ(eTc(߸w:kg=р~|Q~VpF 7}:Q(<~ t5^sZId%Uô-V߸񖄦,`*<"JQ mp6h**/xv)Z,`Cz޽Sʆ;,bI g̐F5xPa{:0NYR9\`wpҀ>vw |vՇLʸ |Q8C.㥇dh;"~(`}'_/P+, ayi=„NggwVr,`@Ϲ8ӿ|wg 2%I$ o #av<=U "K\\@/^ڋxu백fX<㽚]qㄭ4mX,WAFȿ@9|3I_Ggfٛ7^%=Ԭ#kR>S\&x"jL!qAXѾ'WMq<8ꐷGAtV-bt-zhbT;bTMf0+dvJ`b!ivOkڄw䭹_Ch8QP&Ci:8+o O7o<o$ y+Tڋr}C$_u9<^5Z6Bne时܇AP&&'N ;Έr\gVGrX c /g6@{ vB˲B`w2-WԄinIքJ+K;nm/<|]Mx;P(ck:e"dο֓HcL|D4%ME\oYS?rW!玒ƳxeRVǢ z z"))kSZ[;8e$8*$UiľQ(Bh.;iņm9-QUFBHyƶ0=aN($+R1N Q($'\scZ. *iᕥV7SΎoyMx4rӘ| 5*˴V x] jL+A<\ciSzv>MY;`V ż2xO7heS)$tAe,LhkqaEhvy'SJ۠T6 zyxqљ d{,`o2 B5\I%TZ1GI Rxp5|rj8N%}p#nR5u/krޏ㠸022"I[ujZ҆OSZ6 (q4!Ỹ)cpf 5@`wW3a\M BKSnDb^>Ut{3 ?xix3 Lm0׊FjqS ONHqu[d h[)ӭt]cxd鶦uAe@Kd;',OR]Um|r]ReJ΅ʑedū2w,$bM^[1F(X`{#vYUW\.KQ ֱ[۲زR+Bu|[cr׈flC3]6X4 :|D*et\G>|jhx0cR! cUЫMw]j_Wp(ok95:c5  W˦,`׎p+]BU tk㏙N*+rZ{cR.XXzR[% ۠!f,r%ʴ,rKփ*|*(t89(JI[u^+zmJ|t)ZJnk􀞊Җ.j ( cuyJ|ԃ-v_h,`/H7wʫHe)y7Jdΐ = 7/۾7luCGBiqܽEF/On <ײX%i<AHIJXqD @DƊxm("",cFL)36mv0.\=ZpxU+f5\P 1+ZBy{%$k2j#GRڕW@Oc.jgV*O |C$%=D@BW™!-9꾈 🴽+֓d$c1ff-eT@Nnd$[`f?؝* &韨 jL2ICk@>E:;g!m ZW7;K +q/0B+NВ|ujv{-/5[͙~Sdi*K"Z $Ze!NYox~ܬr'hpXŪ BE@#:q 'OpNyˏ hBdw0c@ƒ_dp L J" F.Tà8O2)bB] au\B8:*d8uBӳFA^N.ڝM'sMICo0Mta%=p ";!-`v?=}msPj 80Ǵ7=pi歎E G"DTc(7h!­F]LARk}jF0^X(u⎺VB+P6ؾ]\QP.|/lKIo?+%16g9ABi(^PĹӐ7_"Cޚ-@H<[xH'c=M0z\{Rb$Yq4I-n4&Jî)e4HpY0"-5[^;X-m@F{c%R>3"w`%1FhjQ"z AUK)N˴-jqG] R*M8Gdz2 Z!#8_Ɍ|Nx cz^ǤV=ma"a0(|j) iNsC)8\rFJe0 Yo,$z #Ky]@. wp(!fT4=`s#ƨ F2lq!" kLC(J&YF CLX7YSޣ GYx -(jD 'þ{\* 1tv5r ^"D킶BE렡Mä04g%5WV1qLhIL 8*`9>;ԓ/`A@G6= pBk&v\@t'=[/-yIToܷΤhs-9hf)%l;=VPqHO041J3˜'u& 8nd["i ]#v-k"1ō4@BQ c-oqDᴈ_v_ܑW>b\,fƫƥw%:Ku]oln$~y:fL |ScBe#."+]k0 8rL7( 7$L4S*fA[ZK4A+(7(ÑNiMИ$q.όf&a:Q"e($1kږ%LSiUʬ(CB8aҲ2L"@{s R,9\pB8ǥR su"PIM "{+R0K, bͮ^ɠ, /{R^K=^{ 9 7C2hvZz(ZWAK`DiA@T+H^x`R5Gj794 KfFr ޏҀ4FimTfTJĀ%*A#/O5|??`O X 'rL3e2  ?uƗ06/td|w7wOSj\҉;38 e?mnk72Хh.Xyf}o{gk7;{Swowݫ;7?D3Gz{nn 3 t/? O<2x?O{ݟL/۹u2s?l?6W_7s~%*#t}yNWx x? |~ *\<o\O\f<.(^?_Ÿt;|Nǧa3|L? 1Ltީ?`|22y9۝O#`wT? W/NvXu\uɸ[磻t1xr::` r{9A3 <\q>E7S2`c^!c6~nu؞5Q{ݏ}v6 >ds]689MQ XP`0 Iwyc ։fdlxta ϯ|vA3Yy~z;s[wío GoNNNf~)g?Iϓiҭ ęLj6S~ˁ\.{N'7dnF[cus_7s~;t~O0;Z?ҧ ufHεvHZ-Ύ/pOG3qФ )x"tҝrDi0OlU6zv)_sK!^'|̟-AV R_G-91 GQtufOM?\ٙG#8#}#'ըW~ߘML߸K흒۫Jȸ.ljH)󮧌=P/YJ|.SBgܗҧtTaAQi`^:DbJXDipRE'&kv~O>tgڧ]k[_WhBB*ǵgr϶F;NLLD&cAZ3F WڪbӺf8n緯Ait+[c#6? `[o(7FEث*eYb; S2r2-rw|?[IeTX#D(P8$ A)D KH xq$<昒K~(B(%% rmv,KN"VE4"^abkȀtH&:oHtHtHwGܲ%)Q]%l1&tPS-⃐AR$L)s2OO:$=O+/@'Ѿ1GF}^Ż_ﺀ6d3Ep|.b#d Z}t.v$⡃:Y5Ś;['#ns&=jY (vUv>I*[qK19Aλ؍8~Y>Cb9Od=uXkP?Šq1CJg!bLguAm -ߋ; n8"<г=hoP۾(C~u}Qx;}Qܙ&^ 9Q_j2^w;ټ(Ce;kԃsMA?>IV>#|tw\a9J@ʁgd?wϟpFtP42Vw8p7^VpwQeOq<.A2ڃ!=@aJg;H\EG4UP!b]!NEi+c S\v5%TMyΦq}f?>k=b8#`/fB }cؗ@$RP֠ Z˪95p;.'yBXJ BY J[vbjD cyK )k3T7 `ASޣƩu"ȲrmjS5ѢJ+- xK|lzz/JJTtMBjCQTQ$.u&E$[!Y.]ko71s>+YևhXA[khPM/]]Aoұ1EXAo5yŘҭgRaGB~meT"+ %ﺭEl1˪VXV@ ɬO$ux(jV+n7mUQ˧=o>{qg7'x<7mG9:zoB}_(m?G͙VG|pE}G(Sǣ@uCZO1"\IOe^l.U|\+xB\,+$X1D5ߑ>F&H{Y׏O)#|#c).zhs`_\ۛ~x^xut~<}}uuoV i 鳓|ޜ\˓o]P{~*ŹOd-J)ued:nymYll*~Zɶ{|ߗ\]'ʜbi찵{f_x/+#KQJ,JAW4.\PLN{lZ'>닣˸ !wˮ!˭~kiO*ϵ\>WG lH%Tu| ]v r&9\¡wA*)vrƗ"eBR Jʄ9A[j6=cZFib]QN׹e:zv]hG6EV8fЖ;EaϘ2ӗR9X]1&9aв:+9gAbh bWT".sf=&rf?6ꑔ0%5|$% (T;4Z|+nj8?`{H_|*A+qlI9F j7%ʼnbmWPρ ig(^ۅ"3>)񜌏k):ZaR* ҄㙅qy$B{^p?􉅎B{~e26%Y,RJTMUd98zDʼnx3U1GD '1)[cLX,yҌ)9ht~?{Wbݯo_Y,:Z28ۚVR JcuJ|Y1B2hEN 5;jYoLטD9Rv`kRYϘg{0ޟsEV94y7B~hɊ] :3b58vUz!|(d!-|Z.Y*!gaFzLYYƯބt@VZNݫWm*Vڙ;s \'^'(\1YM mlTժY",Xk&&u`0Ld=cN1}ZHtRξ]G[m5A !*pe7gLYrxaϘ̜wqWޑͦ˔}bk:Dh/\. G[A7CJc)%, $Ȫ7Jde# XY{aO#oy"OoUH꽼2e/"$/Zn}Ly @r̵FN^/ſ/?ֻ Ehm{{H)puka2(Ӻϴz&| ԋ#ܽA4jkgɻUŅ7^-kn8P ==Yx/nP[5a`9L`+$۬8fϐ4gKzt!eG`*=ܙYڰI'قJGo(`v:&(`rIG5J) 61{%:l,uH0FU:(#Jz;K5?k(1رo(XrJTZhR~{_51z'&܌C%&~T" T2Z{9hey+-z3e%6 [4,6tMΠʥxwnJTPq [t%u,ebJ֏_g GaPqdCc`*czaRxX.;(a)ܹ)iS8Ov o3#rHXb3d –Et4{QWi&V< JlŁo-J@h+,Ԝx+ǔj%?Cp [W #vdH`O["$¢$~kKȊqm) ),/EoZjJR-猲?M9^`\coC>i3E|/7ŧOl|!-أ Ğuo $|ʩП-z0jcgrgdv :8SMև%l P@bX>iDJCe:a@-M@^KP/.t(n0`;im1mK( 1GP FY_L}6eAmv#! d(c2 d i&u,H.:Srtp&`Vowo&R[Xƪ,Es;qf`:0DD9RØZb=,𣐜QU,LH3'2[~ElP݃*Q   >znq&u2+ &-UNd@3Q;, hW,m 9=vID3 MH!WW}TÕY݃$(:fVz%,R騪_M}#">`ieL UQӴٺ)!x 2"F+.@oJ>%&ʲ җǟQ$ij╮㠯}eP_/H¦w`B-e5蝒UVK4%&ǥI.@4*nl JrSXku2do"2}FQ{d)HS$x8& +s~9L fBAAueMp~ ɐC}r$Ex,OBLTDr%*20&$R?R!^ޟ.29sd*Lo͐b*8&=g/y/LpI+J PiBeJiBb$hJ톈),BTpe=6JG90 yj-R㷄Jf f+mL^5v|XϻwϣVլ.W[>Q``o47P=>jT> jD=ǷZSc(xF(؄ccK1H)Q0+KVZm{&cD񁺔umC 6uB>\@jgdYj|1nO_p9@' ImQeJYNLPJ"꣓뗱G}sRHH#|@/lەjIvQ!mRj4D-[UH%-w;{%/ ŧҝ<`*Bb)=NbOe rMwYTJʴ)KXR*A1!kUJVP\)o۪1fP):ާ 1Ճ]9<(of[aEW¨%JUr j"&' F\QT= Ru u'r~]@}BJY?i}jߦ)Hb%!D. Iš$gF%XT0p)Kʖ(HH 9wI,+iQ,t * HP0iRkW.2;58hmV?#BZtqEoAd1&B5P\VR%#[VmˎQ54'O 4a<,pFsͽFIځbJcb#)h)ȶlT)iָ`q SD ۅ@>SKMP4h 1BʙMf2 7U/aOo+6KUBTrkՑKMvgL/LI@ݡZ|ؑa Q܆ZM ?-s򞻭š(* ̵jtWX_f~W$PM- ȎzmwD(yZJqȦ.QK@%TUm{fzfHk "d(מ.0,fJETrߤUCDEh^ַwx;sӱ0ksuoB:m9"MwTH!xIm6/?5x 9ݝqeαAxL,M2r uvYa>fg"~Nsh+56xU x8$½\SMM9%ˁa31i/W}7 GT(t*J ar^s*ZkP(ˀ0 !A|b&<7^d(Rlg БeSBr`%sV 829}0`-"Qn2yҶ ~A\Br˟+hs .ڦ.h2u_FX=,beyg4XXn_8ȫs:#^_&/3 t$yobl_݊դ?0WX)3W'M25iO њDgI8 x$249r*]DB\)5:/xmz;{V}h%Z>V\ !8:\xئ?) y/"}VDd)WgpRe8̐pK~%>SFS5/7W--?^/2糷ձJ!T+3N*,FBX}>z;J_Eˊ=%<ߍX* g8K_/6Pnem(i/{hZ鿷>R1R{]?2@h/b?ۥjMmAyްɚqS}yE&DM:MEwɇА\E) |(AmOcHa%9(d{Ẏ/ɡ}WotQK h;7?NէȚN7:w!^o0I|s]ěě1zNdkŋ= [5}}KOMLj^mׄŻh)gow TՃYQBx7'/C8ᅢ]_O^_r{)A^2}eNQ2z6ДEb}5 H2Ճuo"ƬyVWKIe]`ϲ:la<Tm>9=ٱ8ޡ_WӃnus(w`HأB˻Ųj/6.H# ć%* ˧|V\0r-Ť$O$-zWӀv\k5QA' L8$`[MXrěw΍&Z٘Rf1sԅ֭|w>Zȅ zp] idp]]OwN͹7K*>D@=HF&d*auhjCx4KMarQPr\kLL?e<$Jɵ `$xGJ]IG-w'Dkf6z?Ny5\2S>-+2[9ZF٨d,kq8HQi!WaWQi 쁊JWت'+wozp DQIҚ1ɓ,LYe"H)%&r .@(c3b'^;Y8:3:RX%?HØJ 4JYd<RSΕu!S"i1KE}%W#4igRv$31,3Q :m4H؄NS5?jյ Qd;. PΰLu_˴'Tn"q2 ˴f[s9gc?aZuc%cgzƑ_IevU #h`;9^v֠(v]].g},RRY%Ȓ"㣜J\Tf hy>QiaֹoDdDTw$@O#%V1 &^ O|*dOP TAa+ )뼲XƽLyX[*Gy  aS%սp_׃"n[E׳"iS /ܧ~ˮQI8݀C΁ң!>c\p/W (3*g104OKc2A1l5g%t 5Je 뙔J+D gڀ,,+}*C_h,L F&_PkmYN iZUliM˪رc|%-ɃDqg6%d%r3H9A8e&捣Ϋ6^ޭ/ֈ5w7E_$c}={p]QAMj ◽xk8$ś]L}u&5I2'SpߠϦ]7kvxKkL:]Wzvm`x85 ӓ)n~܃)@9}Z[B1^  s]תMm -j8Dϗ_c 4d_xm/95[sBjAvAb@LEީ^> TH^+Z=ŕ IY~Ț"ZKV/ek /X1.TdK-AeIaZ8Mj4 |ЍG^L<]6w1(pwBZ3zj&O,:Qu ݸ] d]sSwܸX&@IEkWH7gI454j1HFP/1&c|TA(s Y8%a.ӳJyQ 5_7a_}s**JZs'kF;O+p mҾ$Nw]zWN51ApBkg8~˳ſ co1}>BADp~1Јs >T!.33Cs<Ec0R5 BI8х`õ)7¨z}ӆvG+'wdS(٨9t #d `>>FI58vv\ysǕB=,W?/ S'!5??(C;Z!U]SkijjVP6` ՞)e2ƠyYZMLHg6ѝ<˵iY 7Tr}@|+!r-bčMMp>e{i-URjXEm(̀\{WFa08 :\T-?i&5] Q7F/օ)+wDsE@S e7 ʹ`9hm팎MM@UQetZȸaSn?lK&RE3eb kVST՜6/QmccҼ7T 䆧3yót?-\`'疜sqs.û8G?hCs LRd/.dbtB QJj.(|wVB MHǨkNiIw2khʼnK>vۋԸ,ӽ X_}1me5 b"##%Wݏ[ϊcb"Oʍ~1it}wY0>Nnn?.򃓊Qa6s`..gm׆`q 6%BR&'>}/l~˂,#OwB5.v*~Tgv=NdZEkFSiaC YpFcZ nśowCXzo>VRk͎:x'~G{Pp\bkA7c/G3僿Vk)S_g\1a,3?ɴSƔTtdE`~1[MR)r^Inn{[L?>߷* ضX0{|0,u=v 9rtF22pegEF%b]+U>Wi 2( d RQs:bg 1:]wWƧ@sG }Ǩ>L0Lj)R|yL8;Vr"{%" Cr ƃS<|?xs?YK fy/;W ;KudY{ə@HǾʓ Zћ=MDVD GÍID1W*"0SXP"ˏ}vz$1&yjˏևT~ccu2.q2ɗ1JFE:-=xPD@Lq1x.kqΰ[e9 P#1"R/7}6z3[#v<2v0I]VGQc8sZ}>d#v3 '??_釿ng侜O'?}ViNWTʰ k}| E;l~GV>'Sq6ޕs1t+[(#yQ\[ ̈́py˅|a:b/ӊIssw(E8 3?s h׉e{ޡJ Mh5)wsU}XJ¾.*hZ=MlɴO6Z߿BnZ)#H tPnTin / ' $~3I|Oy}];7gd!CYL雓xvc>j_ݿB:&("RRX YH s@s-ln3!0WH'T+e/Vzܟ0; 6 =ySā"vF="TG(HVc FB_ce8` ?:Ɗi%1P!S4?J # SpNrF0Ҡ-y,T`B2k*D8~WSBԯ8%˘*( 1˜<$@ea̋ǎՎgLg`4dw r.2 s^εѬaVd!Ӥ|(/w|&ݕ~6?`r/ixjx\>nQ(R2|\R),pn iRl!FKK=W0[ɻ0캨RY/z;in'`NÚPL0|cY{Y8*̊/ƀ~ҎxS𸄽:U)`/^RJeD-[?^xg4!{>,T2 &ub90joM4V !3Ǐ`^j!oa/..6#uOsd]KÁ4}dPۢi1*gKRJmW泟M|t1;t!8\lcSn<)zNTQ5 $%3KePצ;θ/xO-e_5vlR(h;uE+ 0)Q}tK?nxB(iƒJ(Y\J`J%3׹%̑/yI94KI%JZhYVՊAb))>\h\Xۨt͚0f5" P$( D=7aM]!jWNj1oj6j6{|߿yթ?TW=5 R ߾i$1-mg`~B}WcHܒg ^;V^#7it)>aU?l6 d; pH7\ Z3詾ո2rT+Ml9)+,XY#AEVl`pR 2LxssV1%׍;Ha`KA- ˰zf~KvL-D޺[o!~8c5l˛esώЁ$JxK_t(M.Ȅge(|f$LB9Ϲmx͂u2n焑LAPHuxH@$0 Sڙŋ /^P"_V/aZr֙l1jIL SLQ^)Men  @r/1m-G#YN&Tl4 ٯb%1H^ {1QkY!Qp@%y1oNENMeJ8=,1˥&;7wCeċN_\1qa<9a׆LKԅJx:}VwmJh!=ǔqI$Ӵ$i;A0l)俿 heaDE],8 $KfQZoݛ̆w"_shI?:;al$M$\7\q|k45$mDȮiju1Xut)- t:~{AX'IcaYjXEaNQp"e'Ffb\ۂޢ".F eT͝?]%CFaR-!!\9F]B5)O@$L9!WSLԠD 1dt!2>D!Y%&#3IH\GGG#P2́{uNѹkx5=YL[nãܔkNO} }CcTIzVc[< jZ:X$$b$Q,8ıBFrj0~9UKpݝi|woW+^ e04K^(AYSZQSqBXg/wރGari罛:5?ݵ&|1VO|ˁ A2٭r9ڌ Ȇ+#O S M 0da "֌*\uootV=1Zh!_w}dG(y6&Ϊy=oRyt䆤h1k!XiBͱAʦÈq=¼E@(r$-D#Ԇ0`Lj.x ⋪L,inڛS4 :So^SBQO1̮'7G`Vt\ߑCsDӁku$C`tZ玩l# >-VO|^+Z)v8}ڒҀuzMeoK\ "[^yD|Rcc1Eb"b] Si\x-|Uk@8[ :P7_%P.tPok-lΗ#$6M6%蹤o)?ny5.@|iyw ٟnJ(]Q7$b{05DaTS]V Df;*IR"U1QdR"aij d:JTV y8r:_z((\* l,FkvVGCav1ݬe>?VYTu<0-$ߺ<ޤv[n$'77UЀd[贺жwI Ƃ >Qm`MMCۥXH}#U$,{+7K G Fp zZQ|lrD} Q]8 T6C;q ׊O VհМSLxʦu($oz<j޴Q7nfUhhQzgB66Pmi7#Xؤv-HMZ#1uo-f Z+~r-}߳q.= !<  dH85ng!F"0_aT֫a>rUeT4p]x+ buR(m&u->̤JX*LJPQlLB$KI HKFr%0m̤:!$I0PԣH(G̬[\FM~Y$C GB<ˑ$E2a(N84rf|`D iBc/˵rf`KhM@!t;iwu֭ƝĊbK:._PgFNtɒ+//S; EX s̴P))CZpٷ'&9@c+w V}r9ٵD &F46Bs'6hIbCUD",B 4fR`Il0w,M;ac?P]nA,[igI _ fE#j/ xi2Ry &zt1s!mdjgM̽`e'xu*c33=I(ʬ"0 %7r*FRJB V`fh #Xqhxq0hTh1p:1>D|,2~0Uäx{! yj=B}ˤ+C-^xF=={[~c>L\xG h /$@˴'] Mil=?:ßo&goF^ώ}K l,ZD~ByI业ᠫ~%56W}!pq to;OFGж,n2;#`i':ndY\Lx>i6&SsxՅιw@OGv4}r}ys]~bU~yv n&G(W>߼} sn3W2a*kNv4F`T~sJޅ톟.tXx[ǓXVp'Z7j;:]>yu?dj8  A0:yUY)=Sn'd ˭zʃ-݋ /6_w*a>Nÿs_85o_}{bu:A+v5O6S74'o$ӝb*yvIv7@ѷa2.oA1>qswy3E~ i*K=C7@`3qvgsafO/_G.wE=e_cхOx>|T:}$N{ |뗟74dH`.*;}4/XZo::~=MCl[?yE/珽kQ˳ E=f3lq2=16FW1?&{_wL n}& ۷ D0zhsD| y4ק0+SgP]БbA[3;A:Hmj2H#c)K;,UL?;go9l֘Oq\:=aϊ|zj~rl}N!{GT1Jk * knwkW-J.}UH^RS x$~z(M;op{nC{_ҶPpkf)lI~U̬)D !ЈIF&:rRaH-1O{ $l J I11cLZa ')U*, "+nSӆlN?vBbBAcJ""$iL#nS0TbSiT1 qBX2#FiaLBK-5 QeP&QP+V9Gc ם,W`·n}zn^^7zX;2PR-4,2Z 81 Y bG q 7)4F1%p 70p[J_^{T`ez}傕L*h=4KK yA(MɳU>&Q2`TB+RJ#^o/{i G9Tiy~#/<%BHz9Q.)HGs=Ĥ DqzeOxy'O*IJ8p#u8sekQ$\{6g6睑B#%.ryǼctĶ^BPX!xi)@<}4 LIh D'u[TdESM!z8k9ڇ氙؁]Qpt17B/l7R5C))1c廷8uZcyH ANtE(#H(tcヌGG}'8Iv\EKd;xQH%@͑H*$^ՊqhW%`IEp# 5zkM%ÕHckXayHzKzN+ ͤ HD#D%+YI)y'9e|VF`eƥIͱ0*Cd$(I@`77 4ea]6Fk*$!¬w/W+eBR r$Jc.ߟ핮&/!sh.BnD Cˁ! \-뫏Ye\_uk^, ΅Yr=Գ/n_Ë<5^`Bf렕GOd<;T(ZCy qyrru9 ~~s|&nU7^(Ů= 03N6t3{g*"7]{?TGOdLrqxwѓkdLQ(ܴ8-.զ9!$G{cB8ƻ I# X'S BCno6d?\c6_l1ܧ`  J|T9=C(csL<Pc޳%0=s/CYphEP)K)v@G=v5YJ\`FhIw88@d5\5wFls8OaWbcw^y7j5 Q th襻7f)CۊUЋyry>Tt#JzؙSb;ϓPNk##+x=TSET;.~a &p!妶w"xqz}pvm7f?YP\廯^`/×kZ-1RMKRO7I#ѶwwdX4kYڡ7ۦR'k-S[ ɘN=e2yX׿~|U;>g"c|9˩]k鋎o?AIZev13[I0W.hMj do쭢m[wVىavE=7vw CI#w$ZJvf\|h[Y5lrm3ЏļQAV1^n!HfU=971TfzR2w|0dK/M#A5J>~ςtZP4 HPіDP&^ S R8 B g%y7߹.;J_}w}y<(1F?lqGHNB; 4(10(PQ~ f#†J /ĠݏN(IGe]Qaj53~p BN=*,zQ& 1-&2U裞ξOlP*t5w?ԔK)5THBjA0ʚ3El'TrZ@=Wɍvs{=͵-Sޡ?0R/@zbί~7m-Ö_l\9mRÛ~)UZ1G%o}fh/>̂+=E L؜O?צC99uofeT6__D{j)Z\[f,]o95wݹ=ͻ'_:)iCD%)jYYPi\K}(.̚hERPbwBc8y:80 y4t _4骹un|{uy_7+ no!xh74 ;iPx]Y+Pb,"-a wYC2FXtl O2E<,aIfqE|0;wm)γ4ݞ,[_2[ X{\t2=E[lrǶw7wl!}@P%:Wh !zhN6 6]W66{y^ؘCik} kGVMzK`aOAQF4? E=d7 k7AbR>  ENY8tN(jFwR'1 ^D B0J.V ,SL:@~ [F-wDA{O*ޑpcDCD \` -OCk(UӃ|;}ya,Nk˜\`N}B#L00á9Ў$R^+#攐C;J- ~@,i8YzhyJ|NZnvއAr(j&qFNYSʫ?롓!zi]Dd?&.g?x\^cHIͨIM)y8fan/fK=/_>[t{l G&Fi@@4J]cx*֔)bkU<. &\WQ8BWC= }^#⺓obt10 )UEFUKQ1b{RBKӜ7¡*hfvJXgEW n*R$Ww~^ƍ;7;kSDnmR/V"f9S`qwpMW%]Y쀌 Rj$*R%pWDVG3yHp,L⦲77q١F7|gyROO陳7Kcc*iTvDTɊCLw5f60b=UB(N@cR(I$A`kmF`ϞyC0H8yI`MX,)L]?dKZ7IN%#YŪb:XD(ߝP-'KPA. () ^!J?j[%Ubnđ#T#`ٚMB @k} m+rI ]3@:8gj99U_ZEXaJcf4SXKmR`\쓚~͡8Gm< 2{{[ }toB8?;UEepHI|`;}W U̕}X ba L+L(w8|Zeh.Ei"tLmjcFb,qj8ȭ {s8pjpQXن2x+(Xv ?_$; k?7k)C䢶 Ꚓ|l#h\% [QGQo:Nl*Ydrď&yT0%;PY1}z}-4Wea PՎ.im=A"#si˼R=q* w?>W6DL|H:2؇-z#WSGxk-bXt:x7WW#&9v_2p(H~@|0= wzQ˲?Y*@`f%`7rexkŷ '2P)>壌L=}ȇ>}x)!y?D"$hȴ4 JT"[iF4g2߫9RcJDXb.Kșy5ѷhAĄLgd4p_v>e)IJ%w2jKN% 7XhRc;Wy$YmF "RzP\a$ *a.M `scPlU.Cd/NAskg8QMZ\nކrG]2X4@V{V6/R"yԈ$8S{qAAq5lǷ%yV}_ʟDP ]=) E\ ִ-Lkx9bJ ;);!~;C.Da$k B& xq?.5ׅƎ֥XBu TBahwQQ nuBH0(k5;g0s:~Y8":7~3.vaY{;4]D6p!ʫƭs"C,V[<.)od= ՖtL>$~PꬋUT=|`X2;b@Q5]*pl$ŁF"?j#[MD0A0yYa?'僘(6+xx\zYgz5^Z2CZjZ5d IQ*P)HDQ 8Fxy0jU E`k.XJErKB0Md V/fԭQȈhaлU΂h4?f{FG}~ XGfԪzMI { Q~!7(eb!%TW% }ߢTY~2sM7 &{&uGo:p3'nlA^?;|!?;7H&R@q[EpdM@ $PZ0JR88ssp{M&n~Y+}M+hΧ$r&|͞f7왑7п4p<uY}B}Ubj&)znU@^ vB hLRAXYi)hJSUU%XC P$Y*4$a$Bx*+2lѺZ|깹hnzSuNOJiH˘bB+EDCRU~KR>ک{핧o>qy4 Wh#zp2ˡ3l_p摯<7:#݊634[o=mxi?lJ|@>XcsIuO2 /xU-]L-pXDXzD[PF3]Z{9V@)&RB|1 &nQ2'THOŝH4S \N7V;%Ȱ*hK2Pb4ѐ섄Q@9aX aDE⛐8YklD AmX %c/ۉpf'v:J/>shO=%WECc Ogƕ /hrwETkZ{;snȫ5nѪ׫h֮x[-(<ߖ 9"p=(Y)rZlK7cK$dTcÎ;28jf °}8\^ʑ:Յ$IjNniu[ Q~\&FR-A&$NmI l;~Ul|!HO:3>Ց/Nj}/?ٷB̄#;t傫)η!qwL=o@,0; ѧ]9n2-Q0 y{>qp3PwK0;>·Cw'pm 1=z>8ξdS=_릒! %lE P.%wFg7rkt\D۞[q'o(K˲yax <Rt S01iXB0$ZX˄OĊ@+CUT31[)&C ip]}Rt @ ruz<<֙#unF u3^dH9Ā#'~p&lOڶ"(F#>s+)?XBIW#ΛJ$W=>JV4SKO)E9%l=K '1 O„4S+|16|o{ ;=UOg /9?-!&R(|eۖ$M`A.Ylg&G w;jX T~dMP^T y]^&E3kvHݾ]YQ$rb/;.m4BB,f!\enuM 4Z=7Ɨ:EBJɟ>p?aQmג2ᠴLZ;k&!  =@$ܷ Pu۫xa _+e;ר?_fkL7|+Ŝ<}y6:h4{8zA+(O\Q 0hF_L ?3L;kV z6!(!|pno >NhQç{t<|rk)r;aGhS,'$" F7I 'E8dQN|ZY 3 l CQy4/WT@ͩ*%isiifg:p3DL&\*5VA0yEE:U\@Yb$ds-wUz @B?C'-fJֿc^xw9ZK 㐑)dGZJE` !T׻0%q  "d1 #+^ɭq!7;U3)cu$,PHѢ,܂{qV~ʮ vC5tACf=qD/lrdډӠRvg/G; ɑ"nگ+b:Hؠw2f}Mx r=/7s>"}KLy1\Kf=guW_P:vۯENçRqkvA0HB!"u~a *:A(> ⥆a]o'6akw7ĎɅ7KG nH|.ikmSFp^ˆQZ$]U;*8/a,l h~Ζ'nJ9bGX.z(|Dwsa7" }]*>N.Hr‘7HeD@}-.vSďEe_. u Mľll'{2I$yO$YYP L$Ж˄hf DhN"R*p`0}&ߛn2euZ-nADqܭ>&f=3=C)oyP=6gzuOHo+$0V2 ԉ"7C%H *e4T(HFH mh+"EwZ1: D ͤ @la%Nk I)Jd@ X5 \[N%C$Wv y9F Α 9:Ԝ} '&gg kᇲlAq_eYqM9 i|60L &P %<($h\YZRLY/Z. BF`gg,QIӾk)#*eR7 NYaj?IRIݒ\`=c({`sh7^@>L8#MC卶8>6,ixU 0t6LT$JF,DK~[fbGtІ@P R< Q]*N/,9Ľ>V}j?{"HF Gsʾdm%ٻ̥^:sF*?ٔsSh>ǝɤ2SMsIR5, .J$Ort8ޗ_; Vqp4Y^]JfדqzzC+^zM<F@4%W뱩>G~J%S֐8ɨ45yB҂a&1*WYS$+SQT@jqo ? ~̋Y>/CnS7~ul_>2.]9F5= QdD4K-pE,IqN9Ib* ZQ+9[ _02`EQZD%6'Ԛ \g R$1( tudY;PT0E!_ׄkB0N)HH iB+JʔX"oN[JyGd-z^j=>z[ p8E~n{<}mFF *᭒]w@CPĶ o ?->"]UV#^bI0#PHpRq+}X>y?lnI't܇(y+uR6 bKVc{,թPu׭qmoӭ5iυ0jbOc.R= P `9M: 4T5`ݺUB2OwGW el4,PehتAM P`vVZ$Lt)$&EC/lЁf1M tz`hB;P=f.p4F0Ɯn,U`,-zʹȘ2%%.h Ht sjke⏜ufM I)RûWj3𭺾߬+u 1x*qr"z@ZZ9O  b.7GKD4 y|x*%pڛQ5+@5X.N&-(m:5a8}J'c7%/>BkY]bkX*ؐ (y-BSM$ HE<.lnV9y"d2gE$/f޾Bxe^d>tVrvepԦ8jh~JYzѲ_`A 8INR~wwC^LyAVJО#-cEiֹlFa׊p縌QHbf9t. p2KF@MXȆ%a:#C#%z [> BB`\WuH1s]s\i"*)0R忬BIBІEE/>i#@e}Q>ea::>Et`=eF8-8ST u]}n5k\d4MR繒FXnspH%#VXBh֭%HE 'J3kІ#B$VTQKuTsݚH["Q4|'4忷W7f>*CW x[}bl*CBzSѬV00)n b;K!H0uBe~6:;8#W2`hlaA 3;5\_Ge~(uHҜ$ʕblK8$0T9ܝe _9{|K< g8~묳H3[53?{; 9?tϰlzqB$Y #4]fx#>jq<3?`+Z%==4mCLW~b|>W%zAli{֋Ai/nwӭ5@ dT` /;eu `";'gm mGVoIc!b=B[ SGwith뵌n}+KA0T4,@gZr75K'BIUmVo8PBtw}"X3B (A*cvCAJ@&!{#L 0O??`4a"ZC\S< ̀M2\g -/ Bhf] "&6]Ibq,OzY5uLayƆ P E:֚*/>&?ETQ9hu|%{]AG*ltXD{qSgBTXjqZ**7bu(^*qZ䥆l$W$ Ô1SQ AWzWt="w2꨺4̄B[шiVwܬ&U;'^Ty z֘Џ'{{Zҵk;dHڹCJu-ތuNwx`FVYMN3-;8mlj3a=.vctsǧI6@ʶ1oZgľcYsנWx(m<_t7ۜVj;/5vҩziC$rMy_-_e5q{];41T.Zj!gEcُYemԡuľ#ĺVOm[dOVG9.է*hX7uľ#ĺVO4@k->ӺCμ)vy`(thb1:n-r6y[4uy=O1L`b2 9:CI;RcN-uǽ3YpSA7@˳m'L_"b;N-#[)^D&89!9;%bN} -v;y^oȜS׉g߯vL6ϑW<~sA9;ǦxH3&ZJ|q90pVݱ \uA ^/ٙtPq7 عḧ\|3,qaFm!%}IY|FCk:OMEzpVwnàv/V T5C{@$ 5 |Xr?/ㄤ x7}4<Φ׮N8j4Y^]Jfדqzz%tzWR\+DVnMݻ>ilˑj9~2Tt1N9醳(O p,ϧͯt̹2cHIu1sgɫϱG.\Žˏu!7B24WFeI*@+DAx&d!2 W8MxRUo_C<*$of2x2q<1frJL9֒J'SQ8;DVs_uhVujV(Vhh,QR@ 5iln *<DsA)Vz o#=hc8YvA.f!1x @tk D1iCQLsʔV[g,N(M&HU=4Bd”VhWJ5B >"[9\eifئ#\?U0N2 3H(A; YR*,? fR1Ui ٖ,/` OYYIidH&9,ſw:L%.̲((8d Zg)\\fp Rn4+(PN,ש&ʭ6jиnCe"*8QJt*-v80* Py 5-;R hsQ11R';^_P_3]iUσa/+O ~smӃi=>XRo-Ͽz*ƈݔwpŅ8rH*gۻ+XgŪTv}ƌPTD~xTp:bWI2_|r'8IHU$@+<rUrGt d<Ak\Rw*7%k-q6;G+f%<_{t3-a>hsE33 L%[|DZ`%n2TʇN@jh8CY\J'D 8vʉOOdk<ڣfs$bedvHV'pCɋN!e+N J4O7.[_v5ٵ{ܬڑE5J?#-O\VGyqG7dPv2~}4m0狼@3>u*ƏC'iD VQh_RcR(E9dUBJ ན,Cc_t3X ubk6d]>ߌg/?.k|!X ?( xdQi/b:qfyv,^ba THFlP o$RXk6,M@)#ݲvvyMԉ‹RpP4*0"pgc7nWçӓQm6^yʹT=}B$`,U{^yiE5hJb˜`LtbrO.a{+bu!_ԁ?lhBB[{6@@qM m*kh:YO3$%{6kqFEEŀI 8mNd:hfGg-n-{[-=UHe%wA"D+KdlI)rrYɍb{rG 'zbLgB0Zx3%g/J:L}ytnK b4=<,T4v iX V=Ƴs!wWlvӳ$ya:-tPg^ ,rnU)O˜fLQFE&f(-a"@PX?CxlVO0]${Y*X`LW|Pn̓٩7PIJ;}R'^Pmdh?M46$ǔ_WH1B(;KifW0ssX+% uRQji`[&\mh#)bEd:cbX,Ru(X)ELÔe0 Fv'raYdrRbx/SHNd*.gtda`^ND# q%uѵTV QrIP4˜i?UM YVBj\Zn8{gAa$ ы#@_[SB;ʨ)ue/l'U kޱdW 9R Ӻgo=9D@3?=z`X5[Gnx=htKG Tn,Fv~5gxx6XCMt؄,׊@3Z,/l`CݓƠ` n~'2Ǔ rc sיHF9%iD,lnd zfrjޱ:B|]$.AQ#sE1N%?=@`=/TďKG/7R[/+!UDrD.9rأ=WN(cRRK4'BX檴R"(5R-We긊BG‘ !t欯2 ե0rڄT*,l8+ K㮖hJK6F*|’2Ho猳ּ;1ǡ1ePuLҸg 6-&jɣSR58gȅ&]'jkj36O,2} Q%dl^RzI%Ew di+kSo {+AI!G+EB\tHs.tr*Q/tÔ HJI)#\ZwzKv1vd33k7M'zg'0q)8=8]<;h+41vrw_+_kг$HRϞSҫԦ5@ =q⢒$ظLذ˳ze3g$X)L#UHǙ#\Z'VEE7xIh7yx1".bD\ CAZ !IsN=% LQΥ"¢By\qFcqz<t3MOjG{N*h= G*dr<Ւ⃽Et{s١Ió厥K!W6Spx)Nk;DtzHy v{ =[jJf8K\]xǥnhnwon'RV$. XBQ XEppS-{znp>̿)J);?}xEks[2N-QBzwljC?+~5fxZJ{]lXE 6կ{m߫eQŦ4|Bzuk~^b墩OKhLI~۾*< W]N^u҆%ZEqʎ9s>ħDc僢QQWJRU` 1']޸nv߂Z`|n}>/HW ɿL\CKj^uѫ9RW| iE1i[KAXݏ*e5\u3a$|v?Ήn]v-܂>{ur=/)AJtٺHQBJ'U Dd!TR ZiʤSH&8< 5XqNkΑu{5^&*!l!k"_f77<3f6YfInO7!Y]ڿO(B2k=/t_(XW# )֟ߒwoD͊ᓟp "t ?Ky2O?T}'ǒ#*ۃ7\ _"z3eMTKIƓ#̨J!y3xXt cUƎ-U19BM.#@RrpEH{טkyÍuBSR `6sQ\fH! 8u)Hpy̝ˏ# Ѻq5^*P3B]k;RDi ڦB>f]tyPʋ 6D5&@UC °X^D SŔwI{Ƶr+KVeCi׊9 [iLBu+~NThĎ)%DU=E牪/HA/5Ns#ui Ҋ {=6/& I2* 09'4 pe1&TjMDLd&u +.} H ֪``ZP ` h`?^ߩ=Ȩ $`v"p_ 9͢zu ǪA5*YJ-T{kF8qG$DkL-C DcZX1s% !Fr78)yvnƂ{&JMowJȡoP 嚊cލnTD]F@*QDDH0́9888idYGDgiQ&.!)C>'ĈPs}S'uH }go=9OjW _^Go[:K# j4n:%tq 7_};v3o:Xf0$':;N!auyu8T͸뎶㞚!HK(ܻ\@seuCy0_ 6gʳ%Ms=m2(ێjiiЅ'.er\91#nDq>ޛ?qbG3_plD9WGUy\l~~f2W2)Z_%r5ʐ3;GAOhZ*2i71'Z!l92iF1OBE2QjǑdۼ ##AAGL#UL3*# ~O5"T1I0 9{]lRG$-GG "! ΖQ[5(& R4#$VtAEidl(jz&u]=XEն /{۶_! KC@>$nQ4'iP4$49A%ezZ%9XR(dd/ 9eG_ټ(''X@rf$a"k%i1F4DDq5\|M)Cgu)Z"zA7Z=#VRE!ݏa9׃ol`ÕV)иZ kNc`\vbOx-k{k`N_8~k 4M.oTI$ 9 b켖nj lMBք0 Le0Zl `g sYBe,eBϙ:I |ۖew鳎jL5%A!֚]C-sG麸& M 3u?_i  q\>.{z+/ߐ+ydHv7/b=m)]cڈ|)]8laɬO6F'man<[>L-pwA#-zXDrQ. QQ(bbzu" *MCzԪSh=RȹE55Pƣ nU\nmT~@ɾ#0DG*pF1-aqD w5y#f:,JX-@ڳ[{r#9`Q TH`n;cݽn_LvrLʝKƉ]ik7JF~h.˨o !Ȣ3 F;fl1xm1V}Ve+ݥVTZqRנs]WR)wꇨ5qԒ[9ڊZ `5۪9(رg f]41ቻX΂y'^E;7{{;rl ^'SE- M9X] 9 LevNnS%\e~\]YNI=N}8~7}ʾHFW`¨u}?> W?O_^|vﳳ7ߜ?{umsoߞٿI1{~O||s󟓇w|0s6Y~{\'^&x\$nw׳ {MTfn(|Vͦ2xtk^?O&"m44lsM{!P܆4l/1Pv\7!Y0Dsaiݲ"aNN7SKS(X#u)9[t|{d2o^Qzpe:_Ұ{'GOe~|?~ Y?;$*ƎMm r#d|[\Y8_ĿTZ7U2-Z's=7CL'oߞE*>Y^|ټ%g/merslI;?ڻGιuǙ.&q{c+4887/T8z}XϘ5A7#h6 &z:ӷcVUHS]:;i͡I.N`4@[Jz)Th]4[pU3aƚY KHRhmAvZ$RnS51R<dkxD\{6!1bkrB떻8:T#\ָ_6LR7mo.ي Xpa*5f"ɌΙ.0F*"Sm )[Vtxdu@lSPcLԶRnDu7FEJ Ct{ݎZhs&0&Tt,v5 EWYE0!έE%(oZ9% ìFUW չ"aWbͻ0a#,* Jn>rd @ + .73Z.>%ygM6 uYZ1DڞQCEw1(A,j!tzķ~PF]ż̀l~ZTc]TΪ @BHDdtcDw`v TDH0vftj)gZ i{SN|Sv%hg2@2z;5R %xEpsU$Nǻ`{*}Z,^=$;_CC1O/?bgB\3$|X '| izu5-4#.)!1%{moˋD\WXfbG܌7ӣ" RMu{Ⴏnq]f`$ xW4J>l Is:h>HaN%VA!׼q#"Y1R@յ,;U ՘oCڕ]($n^IhffM͐&lJ`)F2JDY=eaX(e:?MŠrK#o7qE-%OtOtQt>KjTHXa,0K%|̉EjbġQ睛Oָg-~Yoh&dW 60>߂n8/A>:nXB##FP0(C1! C+ʾWBtSJA`#Pgjg q| lz J}IK嘹F}a3?;ycd!(NσlZbM pE2F9g1!h9 ; xLYLTSJXp$26)(2Hk2Lg b%ڔGI5VAR \c4F3gSDh-)[ e&*x㑞VH%;yKC )9<0wh 4,w頔"F%! /vi,<(1$\ʃ[}r,(Svb "$P) ,(Z.eA0 ݤu f[-VH*^']>-<rO.ކ#fh%a/hevǣ~猼5Wt*ө  6X E2Y|@="h.Jz}"9!hcЃ,@?phv͘WU ˶D"ŵ\UPL=5(ii_ d")E+ ?w@TlC(J).Rd9X1NjӗNB\DeGz@Y3+i ~ߐ8oDN#MM \v5 Fm3!BFvYHihvmn8 qqШ0k9;2{Yo>(''Tb-Us*BPu*$T+ʂPDV+rLJ`ǜMU6ǧAz/Qx`YE(ZmAx(Q WbiNaPEJpȑ!aXVbf0N$rwl1I3֥8f"!5VcXylkD V(5Kp0%* QD B888yϩ`qDt0F/6jT N߄X[ Fx8^ISh$,E*Q UtG MYB LQ@ꠀrXlA_np}S; FJc=`^8J'Eq ߱;<&GY^a~X S",eb6#W5P!++n\G7lZ8Kѿ7veFEDm΂ZJ15?dhBoIӇ&M?: 4?pPn׫6os͏r} Pŋ@ 2Ջ@$j.;w%9}+u@2Z,0b•q6xYr5x ,svlH%]PlI吧}lu;p !쁵 y痂PXw7PmD{qvtD߮ o^aiF6kAƣ燯ݾubqiZ.n}\ï~ G`p:~:eR$qQ\A;~wLC&"$7@+m%('6̎CAP3LA⺄]ynlE-(VϰO~FMk!H|FYgo}kI-X{e! j'Hu{ͩ.EsPr¸LA1M:tfF)DLięhQNϵyS'5E3ͩւpuB|J9QH>z,R*YXp9V?wUF#x B $#@"׉ dx JCrڌ'Վ2m5yJZq@BS_%'I15N2SF:FxYB(1:fqTx,\3y+E*yffwqp%3"ą"?qԺ}֖_;\g1ȪFB/zDZ@`WslFM/v>a0"e/v2cI_jbuZd/;3FEP~ׁJkVWx!_]1H@^-Ob i.TQ !b+QU+3߫j/gj}p/U+uÈF=gײ5SZW|.<ݻ&eۤ`Uz JM9)kHKkUЙ?+r8y-w R+L:Äq*[ >Q?yb-IeVeB8I%:ǂ hSmf1C4X$pƏ{"C\+6(9.5L-{S -NC1m>8Hݔ/i辶>#kYyuoG-dS+V^; zs@@V2V17e=?4+1s,FGTb#x89 ԞC?>Oݹ;wYD:1kI) K3 FPM1KUB3dY q+ǫ;ѭ/z5r Ո!VΝ)).:'/EQqo/Θ/o`̩WעP,й:z瓕sC v23U/ϼc3+:o햜ݒzwC|CF[Sj\=#N-9~w?a D[Oy-9@%XRUVg pߪj C ljupjp۟ܺ:#B٣;/H5\1} i"5€{xB焸Z{c~vAuq &y@^}M 1"qw<@OEk,^4ڊx%{<$m&|Z^hyǀ!WXvYSoy=|);)V3Fb{7~tn:PR^`tInE}/?q zg{5mYwraSs9R|jIs ]WZTb0Q̼S MgX.2b`` lʕXȘ(lWn`d`H\m { | {S#\} kt-(ۺw?8fWkvYߟZ =#rCLX5(no1,]MJSBny4 hrFf:lzȌ_I8 [<{Kl[gNu;Ce=eJ>zy7 ;7<6??șo?mӣt١ojqS}5KQߑwo~\hF'@pfKQ_X(U]ޯ >S &>=+[0Eƫs~<2$Gƚ1iwr Nh<:B{:0vAԘ*x5b  |*5F8:410Y4R3K5,agUvD*{5TqjTet&\m,_F˸X<ot0#^()S2Y&7mcCJB_?nFkP"WntaWoU1]Bwr{myoHsM>7ۖ2F{mQj~hȟ\Et<Z7Hek- }Gv'JH-Q_ֆU4D(C?:n!XP |T'uNL+QD-#UyƏ[2OeL2aTUCEe"⩠ڐfR>ͼm0)B-3׬)}Fg^н.C(`A nzQSs%m)T%_GoA߻#}r}W&axKkN~wnrVhUf(&Ĕő);ݱw3;I7`aV{0:vۼ0TI٣pBF-¯^p}btX Vg_۳ $n&Y8!)naPWYY~B,Pij+My9ƬM!M|$"LP xX䟫2ND%ÈRW L21V#6GV*YNJp&Uka_;Pw$¨+.Ԛ wrBk+{sCy&ɪ^$./!:Mܕ3=RBb{z59( ݵx><~~}][Ʌԡ0~Kʿm>> |1|]lN˺h &HF(2LF*)djO= W1ϰec hOѻDB?~Kg^"xmг?y4ȩÉ^d|p{ )}֣f4_3iDqK(J9%yΌ ,.fڢ,7‰D$aJ` 6k:߿@w/҂v/Ԛu'|1j/>ԕyq֕3ej ;`K^>@Jr;${ pܗ@HAq!moUֵţ; Ę/~VZQx"/$^_?j럾.ѭ~؄{t7fLFRdeʄdg:2pj (·D/ B SIӣ{t'rr!!!5_VW ܬV7>o8I'Yg[,.<_'rlL"_Eʏ/hD|9y Za73AHE~Opߟy4֤$$.ltH #\~αИHVWJsd2e 3&K%pdl6I8Vj.]tlX%g!,JphcÑ)<#ͦ,^j,H,82LK42&1G1eD?gDT G}fW=/t^T):\JvU[}dXP,Xy `ItNM*Zל`Abݮ]ՌlԷ{G]IJXHAF8JTJ6͔μ^ҋyn0Ճlxٳ"EHkknF,/ғlΞcWIf"Q>"l6~C, 13JL9vIbk4F7k+lEB>x?ώ,Hn ȇ!uΆۓ~-'[_V/siOBJZvV&l14E6҇D$d,}\ Ys.zDTh=X_߬ x,mPvjy`O<кDrr'qPՏ> 燹8sq8qu..[]};ۂK%,YQA~yqDb*p[+p8%'XIq.Cl"k-k?z[jNHK=~̚SYs1:fMGU!>jG &16A  CiUhFp!d}`X%>f!8Ar:9 %g7A'jW-G@hs>8:蜏yUBrN2Z' 04͹ 0Ox@̛d,Ya1jWnJJH٭I sZ[ S+Wԛ`r@\#Ė8G Ij2 ~IZ Pp8,w0!X9xb);jb 5`OnB9Yh꼞T*SsŅ9n3ʂ)| NcF@rp/- J/90c#0^Fcp1Қ@q-f2w/uSsz);'mߐ7><0+xۄ܀5bn>3!W P*ww`,bAO;f6fRs7#%fonB |{ axck4h$PٕrpUHF֠y} ʶ`A &8[f}i cw^MJ=P~zcuB)9n C&XՆY l~ǰ%w Xpc,%: Jk `'Jg) )R+xb1w88 #,K3dv{׬l [α߁fA&}򋣕`}; ?\WfbS|)K83#;*;Uxs,|LW4+j?L>p/ҥu_JT"h zIq$SWgk{ϐv$W/r.Yk퀻@Y{Υkh ܽ}lYz.^ x\✸)@9῏N]уt.1X4ω\Ó*j&^w?9%FW4(敒8ͯy:jD l,3UyǃFOiAq2Yzڵ$+Zj Z x}O0x>tgh:!tt#v:޺; {?O?¯~Ȃb Q]6ѪuP}y|bi\- :qqM̰gw'Pk"+d[:6Eyw7٥tϔƚ:gqݫ%E*[-unõH&G.Z{h)+d95ϫs P>[;>MɈ\40?c0(8MVJc!1FRM9UЀ<+4y]k+WEWnz"kbUŧ1_ _jB*Z8Jl.`Mtez;_F<ԅ/m( ]X,_ ꏆE@Kc@rJ6VrLr Ta%TP͞BB!h7IvL+#! [b2fBjW!L\k{oA "h9(w}ĥ#D0*ݑ2H@(If,+W# lOVs1[dZ #F$e #ηڣ#.%hޥ@jT F$cjs>Dy##C3*uy$QVi4"B"x<΀U)KA?sD<fT%euw&U{fyŬL *DI I>tx.E?ο) !g~-A ۈgBx.c_?mIԒ?m`n\"\ӟK Ki [B"fCE4C0$8eXGaQqr1Hwnr-Bδ[@S[E4G䡶[v7HBeb$:逸ݎEQw-wڭ y"zLxKMm9j30,ifӒ9XOf~]9/㖒O E5Q롄+PӝM3 |rr\x:*phX4V.G58"O<~jozҜ&8&\ŦE~^lJ {Ĝ<楧R-[ gp6#׿n=]+yq%}(h~n_sX)o4&~cUz?7H qW~xaNfӛ%Nw 2n&dQ1(9|۔fxnĸ=Q|Y 3 :vE3/fQTy9t vR)-5 ]U_J]93PTX$ȪD9#I8Fvzuvl AtB-QhTW57>6|~U{ 8u5;q8%JrN{kp.CO)2І $C! p;6QʩJʛO5;a8o=EԢzKk>#nס榀 -,ׯ}mni6lp%)ݗ4C;=ƜSV!(]YWAH?gKag@Z %Ƅn8^u#)-0?VyJB,R<Ģq\0CJvvx=.E.Sҿh# Α6EA&Pc%2.xP$h%gI 5Y}Gc s`j!A PKbM.NCa*B))QU4Pb@{oz)_vw)wo) T@A1l&-x$"3!jdBYqWaz)CM# +=S72iVGfO=-B>QWݸB(R:ZSJyOY"_kHj5>{t9X?qktxn=ñ"c#m^$E!gkSB~a@4ay7}~wuKW~` wnDƕ˜ntzF˘׷jTwB MT /. G&wG ^'Q™Fo$>)7X*54T!X%Eo /) iAOny|Ir uF4J=G'0KZUk)r4s9M$R)ݖԗ&FYͯgø6krYn&:.~6l` C0;:}E)R:hEjQ$T[]C5~+(4!t!NX\<ܚh D(r ^k'uvN.<_çdIK 4ԟ/&]"qK*ZM'ZV0$dG9z%O5+?>KGEd !1?~>G!B MR'5h7705/}k{@`hƋ=$7}Ǟ_|N|vy"Bꏱl8jPB_j^s"Y/rO>m?HztKۭ S7GXBsr*`PGn^cVg$TaޗkP`(mgC"IK?>)1v3J`%I?r)~?mZr6*17R `FũO,wD:4e4XqXOþ8;8;Nvh\̠xӊmG"õ+|4QBˑN)Ăy2)}I%r,ֲ~r_t9g{PVn˷ۼ|`W^dq0q/?XP9{L.jN>vd:R g%e!Rr[lmje ’@;l` uSuKfSjّDNL5NP1eS& "3/(+A.42 aЬDz_Pt풸*@5"R3.Ôv3#\"|!j&4J !6Rr@9 *H9Mi찴‚8Ғ` ,&ÑdWay 3߈A[K& LV-VvS10HXsCCLf%!G F* JęRy2LT! ODHE gH{0D4B`娝rF^]Ē0,r?=sK6EiV˺-?cc<3ǢxM,`(Gt/Sz-;'dfe_ͧ "<ƀ߿n6_ѻkp/.S<, ,Tsmfo&cwn}w?󱃻\hheW,EH*͑I@ǚE}KL|eIwc:̄ùP Mّ= n=ԦA{A! u; 5枩N{mH|*ʐ.98VqNFipZW@Cd7+ e-~*EpD݌'@ [Of1'ʐ{D+ q1ux1,淽nܻ'on0}JjuiƟ{; rn&=#gVo1 )L8 r[d?o%Y==Śu`C04\1"Qp% d85 %DX/\\W?vYa'RD7:}px'X!\A`)^38pAJLI(D i{In(Fi.mzЏmp 2aeb#X`;$3eBPǰQhL& ALIMxKQȥ _uk1ʒ_#C(T)`dGaN2Ip'PIՖR%K՘݃rb %4Iu<4 DRBH `qkqk \SI(uc4I=OKb(B81Vf'3t\°Ny853>G;0[zged'N;tG-~`blm-xN:[A@jA&i3ReKq f#`\ /hZeV{ՂM r=6 }}*0PjzN͚RQQfsvTSBHw=Bk= 6+ 6%xm F4Z;ok(aHuhu`aqZL{yM[1{Ϗ^5 y`]dA(@%}L {@*n:{;?Jk$*:aeaFk /-wsm: zZͻO{ BM/9hR;٩Le٩Q 9OD|T kIFtZ"X?;"\gA-9 fk-_ʹqZDk.]Z%z6)3Pc:8?iVG37A6).:pm2rkDZ{n NAuՄ˹;b1OqtJR9zտwβ?Ťlr qf>Γsmy&aq1]Kκ7>|lӳ^OF{皈pTV=^ڳ?驪UpT}&` tTn1?moקw+x󍯀w>$XXp봟Ew"*fRZ+p VsK!$Q=ˇ 5AũbZ>A'}M c!uI{>VYY ]C9BN9m =G9Wm:UAT DgPY"߮,??RLvw!^Z>uKXK=5 Qpw7Ǜro;g`1M݀Wᨢ*~jHz HM#^U*֗3*5k4߹'Sj{4'^\TфIKa$ENZ 3ǜMQDA(ƑX$"g5Vx$]ݪSpK!\T`$Q<UTImN=jپ;@"{MqwӐĝ"n"4RhD%\2pIvf5Nw\Á["&!K`r w`񿧺mFHq^ Mu ۗ/^mGHՊ,vF+<Qȫ5p&4`+a]DSg!1,9ԮV &DZCwYouհ6z PD)GsaY/p0uʕsDvCY"uؤ8B5]viOή|a(שC4E>/9+ƚd&"W43id*WR,c̩c'vo;5gO˼K`> +Pt禘l"h YJ4̀w($? q=la1H>g啝}2huZ}sy~u\vHc))gcˁkv}xۓ\RuKzTj 'V\ s {M5dM5ؙ=JWľO[kfKt!:7ig6x fmƋ l6sm zL|2|u~P~Bv *>f 򷫭_=yɻݡ?=+U{qYLѢ;[P7hrÝF=rZ^K\ ˖[ۭl.uC/YqwCPL_F"xAu= ĦwSO=whm_ST3U x @:Uw % -1n $}hum -@0&wHFDJvE2V55}Dr`āv/ G똫TdM!fD7m\p34W5r/~7!ִyKiz%ip^8e' #1qz腿z1t]Ltr|0E+f^NmS4o|bQ&S7wטR-ˢ:gBqQ[%-T`oSdjp} F6%TI 1IrZy@SIS.IlxQ .lkxjaJjL"ie("!ӊqb,U՜RfKPqQV,ō;)^"w8;DrL)r\"/4G.Œ]gX/e^W$"ftlYΞw\0g]r"4(Ā zvyk-l $ Q]72B[W3sDh{AWڸ5j!~ 1LSzX*OUAY= |Us Dlyn;vz=S(%},˓HP}]5/Y}vX*ɰXm:m,h!]Z9GBR/@hz YR٧HiDq.Tʰ'FDH/%q"x8$q)jq"#EqƲ&(%llvX]@ t cinH[ PR2X%5hƑj˕P2vZKdD+,AiN@+$SŸ#΍keFAɚ^ՕNsf!|;/S}2z޹*X&<^' ൛gon0,{v ;_ŁG` ~+Ǒ.fnX5ell:gYkij"̷Gluj,&}|8FGľ.2`t`{ؤ o%)a [}z}/@&/FՏ4=LDDp7#Ï/zA2%v1Fw= h0/;%\< ,`L(H귃b!F)oc30R o8q\Wʌ/qE5ډ8iFOy#0|k$V>'ĻQy䘄S" g32u$@UDHib.$⪛pPxDmJ:uid]0m6SA{GlV. *3X)%`ڐX:x g OX1x|C0Y,c%ł܁ -4 ETh' `RjPH4ߦxL%g+L{b$IAQ.GOE (=]cJJhCVgXw2%ɳhrX[BF c!dpz[^ڽ\eTeƚӒ:aG_?yy{)yߐyWĬsf0ݎzMc,LݙLH#_C$d0+qdw)-F&X3'PSN@gD,A 7D"s-۪w˵qQU ┤}D_WP}hsiD\WN iw3>ZS}gu~`+Ovd|dՇQ{Ӂfb8 أo Kڜqկ(LC3g.8fk/1]8U[d2\0qfKњ ERR%˄I5vWBjY" ETU{4*qn\ } @IepQDŽˈ[ƸO4D2 'S jrhV9F˝sA (d2a#HZ$9pSb};dfF,ߖ&]霕2׎gv]ꃲ %pwTyZk:@&nd0ERR|QzLFBͽ<ŴP_el?Vs0ع`-޺fH$x+s0o^%=̞WYK]lT9g6a54zUfo xB3J ar繨*G#.*zgQ_B´xQ:Jߔ@x7VzTy}mndǒni-7" P䘎A$ML; f 2uc' >/@oQ YLJh'. Xe"; i"hCBM+OwЗ@ں\\_2;(4JVǑf޵qdٿBK&;C^ .ḃ@GčDic,-d7YͪnL KݷιuV}).U8AҩD*mT8R!FRɻ &rۚwm4{ư/_$K7O_4SB$bG/0[F㑤خ.O4boWFH۠]*h 3.͚VךAiײHg/uyV>}7̶~ 0 7,sR̆Mt3vU?<(J"wG`ˮNgN\]/ft<~'S*7N\".?*.\ %L6~뒧9oG~T;c z܇'O\Թ9a; ~Lv~h~q;~T3ī Vi=_^p!nNd<3Br 4:}Jc/,"MK:B\QruqQ_CUmk d5o=|G6^BX5j1T®R_Q?;[eťY!!WH6;Qo{ԩbYp "&_s}P7 ڜ/2#[d6\:CWz DlgrBMjmAy H'_*[]^mB֫u͞ AW5~Sf(dit]]ݺ~Wj('\4Ω% sdg`D I$P gYZ0A1AZeHJaQA2牂GdUt#)}}h20}K L_'A?t`R6#~E;q٧SOl,~\ڛ帟s[cd 'C^}ޣ;}pui%QP^ZLG*{bd<ڄ}c)VҎ+ t/F$HjHuX?ok-ۀTJnÇ+ܡRkC}(0$)7t i {yMwéM Dv~e(3r=+<5m5YrOlx.xߝY%!}d"j6pSZrxWw }{ -FXeKҎ9*H8c+ /NIkgȳkysu.5< :a KpBWܼ:LpV{u"H֪ExHI@$ˑ월'ɚD?C'/TvxԚPN|X2hSQp9C=2ӌ BaCJOɱ+KL;=|*F+pFf ,C|M!aAVTp0ʢ%m,sݲD_U'nT&x|DD3Ίe{xB9r9iYpUEujq4uS&gnqU>5OM>&V 9N$T> m,mс 1U%ݴ1d@""1pB \C\~cX/Ԅ3yv 縍4,-g,-5EGk erJ 3T~1"{5u`/ B_pVDܨ\e*Xh%(^Wg\(t~s2oVϖHxso[nmp0)߽L6F̩L>Xw=4x;<ט+9y<*yvzj\8Lb_y'lmz{ޣ5uGm!x%B{N@<3nqۭߺ^ŭn@XةtwԺuթ՝Q1%Z':;'4iXM|PȰB8x-Vuz~ SԡK] MyK(uyȽv uG||wN1r6LRm,PSV i:y.U&@;:s*\q:t E^=$On<۠)Sd5SxnSPya) M[rq+/CD\9Nda˾0 t)O yG!$Rkn2\P` ##ŭuC"u/ífa*9g%a52)֌"R1JF4h-0PedRdlyH~fh [.UtIi)'%Jc.k9Z9XʰV{gXezK\Fqf6f67h7C&ƚkbo&v}pD$A3ʰUT ϊ$!TSeLPi !ɵ,hQA!Ww)Lb<ɷz5C 6:f4_͗L顟8~~`Gm#K}Y[@ ҵz,#(\~kկ&@_ı:чkTW3GB. <.O OfK5{u}`9+[1W`_+l x!*3vJ|AlϭX LOh7c70}էLf:d08s[g(e 6X%ʅToHP)(cYT*IaͷPv{L8)E.qt3i n/&xvgVYUuٵ`eɱ0NRY qsam0\b K*Ǧ0Fos@TKb9Ay,\/ ˌ$ aQ$09IYYc)RQi.^JQJS D)()9)3 PEPT (Ir +n! lW[<\ jd6qd~q ٯVfZ x/<+Bjk/8VTlQWY*Iy8ͨm{M 3EێFpkN`*Uol;_ihk:{j) T!GO Tڬ nuFu{p R5R=hlcor Jkl6;|*~wɃ&w6޵6r" r/͞Iv%uhIl?,IZRk\V0Ȍv5d, U,ql%Œt((zPӗ`z, 1QQWKK;߫U!G5Q1xnz N޾\hj.Q"#P{N}mkNZ9C8Tמ]Oy1Rm;T9CK#(ؠ! R"\L\3+{#j*Ӹp_gdsr/eM|Yy}ql HޯmٗP?ZFˋԒdĹE\R9bA# uqdv1{Eb58\ôZ߰lPM]K)dj WH*w>_a[9CQ6{0.UݒV,YPv+;WE0I4YWGvND\Uf{v-s^=Aȃf9̊f5,+A)\hǏ\W:;j”X2>HoArcE3d4ɘE'M9l&Z }kypTs`9,ük^jNfKQfnm,QxRO#lyl$dT:둕/k7l˝rID;0R}_Ո.eg<fжg#}QG0e>[bQ]9c(mmټ AAHKV qhLٿF4>*]RѶ=xu(I$aNYpD'@:E2TM`$4H,uj 1aev]u(0W[ee)1Dm&mbRHl!3yc9llqk g!c>Xd y F*$5]ä*]6cCϥ_^fFL_b„lC>bJ+6 1ωpᜌRɾ 4X#Y&IXR(\HgZ,\ X5J{&Wl<d΃ .DCq!&MhP!EE1iLbmvx-Va&h*pQje1BZ 1T`<k V!2PaMV=ӱɞs%62`FED,9J58gBUR㈒R% *cm.0jPDݓc.7ৄb\@Z h5A;dKKJWa4QENww\3Df5y69D^+ls^cyDmTكeϮWb5jÇWˈ/cV"u{RHF]=ݹ0h=D5zHr?":(˒G;d /JF DIՔhB%&Ym~j8ոB_D=i*sh]}ld[ -gE.$WmԻ i ;z.mזѓ}g[8@t9{B}+.X-(Ak"c" z^FȜwެ3PJ 9$4V<0~M NY=2GGuRu*n؅TF ,%!˃\R-Rj{ƠdN&l8*#GB30L3:!<}A-! XYuW~A,Ai3g/[dQ6Jo"}3l=™'d.YvJR8a"*&\=%W#CIJI %(ãNl;>qSTc_|fJ',Ixnӧ I;^^brRR/uct QIqPrSÏz;JN#7_q.Bq^WP d*wB  T wlHvoĨƱn=9_XA w%cK҃|{ 5e 3}kc\NE(+QU{x]gUY۪B59Ux0k$>E2I<^nq8gSEe[v& EGa9\I. eM(8CǛG1uy0+-1R$1}BQK\{뒠H3a^+A7PS- n7].JW.%1LGobu>^?:p0ϵgH{{Y"A7X2޿dTN5Y~«9kۑ$0YbGܠdQ9>͈RPhx%$zQDtU`}.%'Sj)c*q!;KHə( 6 -}}n^xD}__<}t֘&H3 ;o&5J/ .zL(UFikj[ڞh1W9lI^5\}+"_\Ɨs|:~̌/|.O؛؛)b#>S( eksE14w^`maB9DReua*:Wc+lvlzGu_t5a)C ֋Gpno # /m0/?Z! bjsc=!TVe8Du4wHtѸc M;RNjR:q]P};11v0qc4 ar="T!FaS< r!|넁kg)f<s3Z G<"q` ;ɬLLF8rs~ 1 lV \X?JT:׹AmRD湒q[#F=-Dt\MA a=1UNpRI  \uljS;z <pd0>`9[Rh \0Teb Z6=.Pl* [%IM_ϣD B~CtU?5I=(2ë/"3G?.ჯbŇa~uy!frƘ+M j&t]<hk{{W\(|?5iADD5Or <@wE@Gt}| ᬘ F o拵<{Na}lgOw|n2;k}xwGh[kam~{/XR2֚+E ̷>lPř  Zڏ,r]D=K]bQ̨;#P3MuUե K4Ĕ/{4aDKFGl)ҪLQǒA9Q1"v$3J>˻<|޽Zōn6&ri~jz,%G$^3?9RŠ M2fϴ2ӰťHb9 )gv2_~ϖJ0ijgG-lfa, 1pľrWa*'$DHX4LW }Fٙk59WX dfcb>2M&#e-.O.itE_>xylVjX[%G,+dר dan5Q7CY Mp P~&>ⵥgJBO4CS6=1 zF4s%BLAX{&'NQQO>|n7vh9qT%F2'%ygha2n<17H(K1"U %B\G52+#9JcsK!ʍVz> +fG #@3ƃאTK6G_LhŪeSu:n~6 gt~t=W[?N~] ?ALVD}'bP;V ]ݽ/?;_ fՃLy}Fd?Sv_U.MF1wV' ./M{}ǶҞREAx! y*ZK'ٳn.ph Eu꾣u/:2YMnMhW::0|Ĺod2N֭.RT;X>wJjڶn'jݚА7Eq.UO4Xci1r_j#\1>YL|kIl.;dR"Zc9xx{t 3xo!?>iC qgwJ׹w ,IK:,u[3Q!/̐^Cc$*C+#0.uTU W}=uϾ%lGWJD۠4DUq %fe&˩cat?{w\GѻT,D|r}q)]N'r˨rIVrɫɫbDP H 3Cq3Ԣ^Rp q_#ΔD#=Q\*̈́"v})48QSgNAa +&eRɠܨk QMbk( kGQVL!}fe(vd P6ijdvt` 8¦8GfQkٻ6n$WXr WCvRwq/Rf"*CCqH i1eqiZMքpM)vKMFQ9gQ]Tn6ej\CRft\q bt_{@HAJ Il˺|ܒ5;Ԭ&l4.ې~V :/uK%K!ګl 1й4~5]V\D[>׃ťNƕ67/Š}Y:"*f*n[5yMcƷ& a"fMԄss4b~ԁի/xd_-^*Z_-}utt2vjC3 k,4gƃ 6G D( ȚɡQlL1g;s˹@1b= 2g@r-y)g{jii*9MA*/E IjT`JI~a,h#fni! ,"&r*fx -rs3D+ntdcg9 Hx)>hk0'99r?Yՠ;&1`Cum}uGzhPŏ71ǝzfpQL-gӃq9{x_~~5"(ȻO_(TDi-~槙EpV^ Cys^tЙ1xuUWGPM[}{p{',\q&`l)iO !в8""'d_ر$2#B+ڣB`?otneRHVXnz.oGnPW\_ln bo'wN?W~qS;XL , `m~7< l=Ƣe(.h yay|qv}g?pܽ&r kԫތgcI>2֟(BO`47fyb,eI:dUJ|wZ}ѽhQX ὐ=GMC,c˘p4\3ENElly!_l`Hb`ӚZJѐPU.h}[`CC$d wn+\pi +Bư%rfDpD0=M0%F`Hn2[&ax#2qiHTj xTuj{bWʁ(C*C2KILKePjK9Z]CEri\(mi0 grdsoT0T5bXѠ())BErFӘ}1IU/URe0!7Td H,XigQvlCus`s (x4)hMFsT4rwSҘP .$uIFYQYEȱuf &ytlr-[O>Rx?s pa8xƔ;arB h?GOqЩ{%B%(9ŏ<[wq5H.b] l-<I}i[ d}S)%72Ԅ^dA KƦ!8P8T%FZQ{b—8j-gRUĸ&x%\]dE39[3 ˘$*3[nS|:-u@V*\"5%C*6&K0y#ɶ"Ss}m4VIW s]Sm[}ΑdOn6JpaN2G/jܲ$V5tJwf|p.ZHW~H3G0n30 ~? Ql昴xiZ,G-M2i݀i+6`N[(/G;vLv'do `oNuW_>a&ýws^ +敽9n8n󵋷ǏFw_I>?tҷf~k"8{k/U\4MƥEOS$:)÷'m?ڽۖk9(w`mI9yJczJmT k 5挜 ]n%om`m7y a),c-e6`NzY`x\JVoW_U|57?~𳁙NGnE *Jq}S>/)uDk!P V]?qJIc0u!w5d HDzokS/*0 npn&cOa 2uo7yR83Q,lQ᧷eUw%z'~Z>Y4RRa\ -%ɣ͛\Qs* +LJKYާꚳ'6tmHC^&锒'(ڲnE}ZD jN;n-tu떜(uk@C^];|h Ӈm$Tm%bܔNC]RT;9”cDkmDzȗ%/] =(PXT`R(dzYU(R,){ΘV"!qɄslR"1P?WT)+GYm*4> P2%:m ҏx h8"(l9pAFPj%Ȍ$cF!3F:ZɦA-&RF g[ciAG|lZ41x\R"ej6B@ٱH&G:Sy/S]~0 *B"(2[*Q9.<4ïAAkOӢpVɾQP#/&D|DG]}YxĎAW~X8v;'%grJ|fGf??ޚ͘@c8`V0$&? fq/ߢ;/WO?mA{eǬH8(()n#Fνr,ؽd}𝻲]3ʉ$b( "CέGPȀ;ZĪB ͢五>v J%νsݽ6kzpiLg o:ͫP)^ 4YӇܡΚ>\KCqVji/Iu$,#0_B ӣb-) efnFu\R_q6s@Qk,Q04jƤS䲤Ɋ!E`Gӯ rq{EwWɢh_?/:\|OD" oPZ *K?_[UyweeԒy!<;sΩ$CHRF]b\:lzL).܉ VI2\[T*|Cf+"y9cSG9ġV:|@_vMt3ѴV KAock-< @ɇqڎRmrǺmk,[t[ Z~}swr%$CQ ) JzoXS.U#2W-V>a4OW 8)>tVŔ+]:OV )wR0cm8DzCiʁM9~ޖtچ%%I>hXW\Q'ƂNu$)J V%ZrߣqН <եN ObBWFZ[ !gt_H'aCR3΅=DŽH3/{khmIG|r i U(KbN.ʊbD 1ZpF(ڳRǽқ!s;CZ /y]иєad06=$z f]Y-Qq2Fސ, dU {c*$gr);A33\}I, $]\XzLm )8.Oy^_=8*O;QJi)$߆sn<+O;B.qQHfH„kz`)v[{#S0Q5&=P{>}еd C1_jhɈlzE>{FYT\aUiLK= pB)l2Sl/{& AVʞJkӢRJp y\w /t{eԦ&e!S}kq<'G$Rc XVtRr30DO9@$U88-W(Zrb)9Z뛍5_D 46q޸ ̳Jy9~,~AFZQk-N]AKaYN] ]*`5kPK3wUuETwcPj?x2aR{wPEFJv"IOg,25G 6O7nmк3BhqcXuN Ț{ZQiuN4,'Ky?xGBP3Ν-̡/w<(*Gc{yWA1GΒ*-zeڤԢ^$[^H<HI-/kXR%.SR>>M ) vr7 \2!+7ER`f7 aH\BPSy@v\0CCK#gxܦ#s̈ъR ӔL1JHnuJ)Ht8A[ej_?-]5M) {ɨOdD;~9W'?1Y8 G2@UIuaq>w Spns,{Z]_BhQ TQm?OU邵)藹͗ PI"jㆃnow 3oV߹57*߹|(`U !x˻x;y˜ !8VݔITNv#RO^Z姍 X "KթEDlf)%~y1C3CG t-(@neis V~uV cZq?c; P Lڔ %',[ X1ReR?0NG ڥ Sf2<~Xe @@Or 됵MU|!uKx*Pٛggn}}%n_Kqռ 1o~64k?bU0~n? "wa2TQ=o~8Vݔr~2k~ʏuClhU`5%7ڲnm͞]v]ZKS7E{Zq==><_dss;{3הxگ o4f6N{ T2wxu`5&4J*!͒SϠ*ѹI #;PES$H1U # )Ũ䨘iaQfTRkW9FZuJ* rr Lbt$2(QըF.D&?r\6nE&%yT烳ҌIm<:;F hbMUQݙ( W\vwR:PQhh"%jaʽ~Vq Bt ,BX!9-mNĂʙ֘R VXF@G)Jk2j()W1i۠YsW(wҿ,_b1;qkתrzQLOrj9B :X5Re\h'r;_Y/ˋl]fL#"ጲ$%/|?rN)鎌BWADpYWap3F 9 }PpH_%K)ZzP:źcF[1u68bCS[^ݶŠދ EM;m>/btu*èDHTt8":P#%bB'i*DJ6ps=dx;-Dt-1B.NNpdk+ (v،zyk5,uL@p5pa乆Q? A \kL 7YN2)$Z iFs!aܺ4ϐ\s1VH5l4[76֥HO" ]! VRRk&^Z_&!Z>yu6r[-ؼx/p/~QLX~|7WY6[o__[cUym}nЅ-mMq>_~xamOB vE:y䶋yrGՠ~[c?\wB.6nS]~CdV~Ȳ4 w`IAx퀺S6THZZӞ*a)"k<3Zۖc9JUk5SipBÚ#$9"fУ*r;Gߟ@ I)2aa)`%QvϘBua5gc jS?IGƣnQgǝ%sOlȹy@X/:sVX -Oy}ƣO ePxH366i6]$$Dд'ѡ~n"uO#ZX징'wU.94L.1YLAMK;pg5aPxqSKyo_D>Sk *}Xw*BH}z/>~.YZ`?j7f蛂Ơ̞ͳWd:niK,;&B]V6\XT cHf ӃˤEyP8-͙eS' IƂu gxDf$4)grR,B;n;mH(2PFg ~F%^vJOW4jꢐ &|&/2(~(~@L7dqhL (Xm\f) eC"6.VȢ*2iZ)+#%WuhQ.g*"ܨ]eVIujT'JPԩ[:K{ڐv߶ a9ʃ8u;kv0m~w꜠S68B>аSYb8VZ3>k60=8%JfEsH OϚ#۹g߿4 DbCΗ%vgm3±iIP}Q1,R:u%n#=}N{#Ono A @#-wQ8hJȴ&]VeKuT󴋈@d\voPi#׀NAu?-aàg Gw]"5A똄aQѤ^nYR"hωtiO #J*kT&RZBzvt無RVYMi!ZJf1BEnY9X~9_3Q %;[km<x5jo{k^}{WB;JۅY~*"?8W4;7_`9[[öOşK姹;Wǃſ:J"yH:ҲD ͓k"3&] /^m(K^C[R'̑|WeE@d/_xe`q5aQ?m}2g$&`*H)W*SH]r?f(ͬFٻ6nWߵd;@.$(4o{ gHX2$9t"#Ηj"̙9<<$GBoS!XE&*25'42Փ]ѿ@d.(GQW_{~xlqB roovQ N<s,9L;ŰV9*'s8cZ)' g,ce4T)Bd@ykW>(qwDZrڻ kOg-?$;VCޱFTOӇу)[648(tL*. q@Up(M룗V`S!IqFN'Z2TbK~5["D1=JK.'&%|.5J-PP.f ,#罸:t¾]fYa: EDk[Xib߳]"!2Ղ5O෫W?mr>c%yxWΗo=VFFOyأYb4 Dn,#v~XmĤ@JEUX~CU4ImuS [*1:֭yVa:n1֭ y*zNQDn;Ĝ P’Q#:5LPBke]M3zL9#)M"1d _D\JD1I _ӧ{oVkꝼ[Yd+;Y88Uһ2_>xB7aыT0b;7+)ͽH1οtag>q"Pޤ1v,l_p r(+;aˏ7){yBE{}zI.C D`RɖRp˽^R|Xғ a[Q3l*0+|ZAoOQ%VQ/>gwzpx~vK.zjl$3$;g01/ qB4T@kxnC 32˥"”v.46+lR@AuorJ-e=)=w޵C'EU!F@&3I?arєzAd;;{cXĬDXFa]: Vp#rTiƆcɒd?{A1{5|~ߤ$-ԎQ豔g;Z%<ͼ3}Z<>n޹XFw]]9b4a&rfJtN]]?)[wNF{PdK),V{䮅(TX>$ i4\E!Ec2$ "vG_ G* 'utńCSZEV]Oo~~o/Xv[Ťyp@Nw[E;B S|!6,dYAT:3nrCH J+98f8X/ӟ)9{rdPԔѿ@`!o$eq"R3$Z| o ǭܡ4W߿Td,<Љ7(P;ۻPWSGDʁw%P3aƒ(4ٱ!#ޢS;a)-57#887q刂(ĉdʛs50ʭ% |$B%…[=L6[ͲI) 8SNH?9T-Dp T5uXg1N넟E Ci9N<5ۯ~:y2@iP!2̰I$BD)1QZ\ fY(3N]33h8Fpi,1$|{CV WK-|%]*kAPWe$il* ZlEulC&Uh["ac!cϬCRۓW.YYCK W״ TΌ X}KpǾ]' *:BdpkwӛZ$}: B`t{5UAJ@[U04(dK`آpK` ,Jv#(+1hU{ٮٷn,K(i344`d9WF ͇30S+ֱqujUϏ>yqآ'jIW|El=?$:xS'zO@Ad !Kp<ՍNp\]x.Zbv?,cɤb aĄC~a5W{Wvڞ] jucŝX*EDRE Ά@dNj nnjBN/[hzKOBDc [bH&|=D 57Tyٰ5@u:I "g Iqy1Gs`f#"91 0re7e #yl c*#hS)UCxTI=]j!9(`yŬMA-| 'h![.{|t*b.1 l%5[`Z Utow /72c)`(LJq!' K I}TTLp!/_BMRޛ( ^ly]ʿO/DixdN'g I_e:Zd;%=`(#G_e8j+MMֻ5k T'MJZJHvD@1l'O8zs/ӝd<\zlz?p7.꺨)Be$&FK\܍Oz %g9াKfP U; c/;O$4pz 6]Z!BnzT/w#eSܕҮ+vvֹc~QRnGG.(M` S!%~[2]-B& A3VznO oC[56}p_Aju,j[Nv,s&nMo"\dnHC^&[7%[*1:֭3PAw-BOukBC^&Ta7UuôpܣuK FuRQǺ9-,ڶn!F֭ y*SM<[*1:֭=icx-XukBC^Sc|TPVZyW;u_ͦw+?.V!lu'3P1皍)Qb1wn)J!;xƦtaP}#DxRرr(MrB}%U|I:QT<;4G hQ$xu|J^h{uDbu}`ޡ=:54 w\(!nE$ %S>",Em Ǫ%/ BŴhSl81m6݅|Z@v.~y,/pAxafyٙ9>- Z RDX *F<7#a/Cc~W]/3*M4dT.3bVb)XR*D M[%b +CJb #6~WD8Ya{?sw%|;cgoCo'ɰ"i5Vry{sgu{zMg?X_8 Ej?W_\}ȉu~E&t9Ru^0f,Zjq xD4  3)D2#Sɩ`s&{e #;6ޫlS #T q(˵2Bq2 Zi-s{ƒ, >H+\lvv:!"Ek("W+H9F3ePfD.cVpr DCRۓ*B_ڏcq"TMS?.v|+p47Z  2d9R(27Y4Ɓ] gMQrM9u.gT~2r?pr LXE$1κʨ$rcΤ]}*ϯ֩ Nu[!i4ЅH$#?3264ЍLJdYRke^Y[GqQ=<Z*>\/cWO]zH2_gΞW "3z0UgZwk.mkNVRHu5_t mَho!Ls0BgQ1,>]{Q`;bL=-&qyi}ZiBji6_bo60ShjTk6u\׻R:$/C뼇R}@IiXI>g٭>F'8H r%dK.Tcۭ Y.6y- d_wSE*R܍OJe}+>=xi;NG[(F}`U6B 1npH%L<ɴBvcL -t/Sv/ ߽xA#N)Yi3DҼVHkfq%9f834%9Τ,ժo|IQsЕ-X{qRNn4{ )=jL T8+e@ ,uHF?xw[i *Z3EJU(# J2IxST+ g` 1S A%2ޚ^S #_K=<,abѹ.$"%)$G`u&/ya͍JeiYRuk`CLu2M6t1c5f}~O65)l$E)KXLA 1BQ TvTGQ#8)rɳTif(T48Q,mhr>GDŽXX/]L~AL1TxQXr04Ԁ_d9EIf4DDǣ|Z8Vp9ƉL! A*%nZH;e`iQæ@8f4iH [+^*yaFDa\UɸcbS"+*Xk ='Qjwߧi;'6SrV܎ IЁ8zc'6qK~ ?wOЎ Xgu`|"O_<ΊPdfu緿?!\HmpW.4L .vv˰'}@_JjWW??kIQL wl$gm$H S'/D|6G; dHf/G2.`ENRcQ(KTAӂ) QM 6RIUmm6Msr WJZHe&ҬA3SQɨ4܆. 60FqbM&ƚ&LːxIq`"s}tr2#IP<Γ̝nȧ oFAhCЮMC<~!ښbmn*Ƈ2Tjw'SxǠ!80 RhH EHyjr T8t _tAtNc (f׽}(KxzWh=迦׮z=zsx;>շ՗sl=“H-N'6 鹒OwNa11AUF Gg4NdÝyPc\hHB(#"ro%qIܗk |`nsG{~Ҟ_5[t'ekm`Q?᥯ b{.>Gq08}l5b"& jO$Prz00/ k)U~< D!;9J#ShRL xD RcL2P\*5Ve.XR0JS(|rmD62v*[ DKZ ^(FeQcKN)Zg"FSw8K5RKG5c K$8x߻ IpI 3` H %B! E_CaJDg~.oȨ#r{5@ 8  ރk/nm{=\eIpaנkEwghtd-ϐ?1*T盝.,57Ha$w +QRGp"Ke͎- 1$SL/ ̝M5^v~G7@I[y7,d$J< mL|g ]1\٘^oDJZ]@.O^U2hTC|%)[YKA͊Wrej!U% 2!0qq<9]焲.]pp*v%Gc9|ZȁU@!ץkJVy]>c‡M -@Ma TV=+-W)v]o,BC4~D{yT>GuXczEό={eF;+^_z̈55■bu͟Q%g]keԼUW:5PC]ekkh+b )tPC%=A$Bf3}-g̬ÖD5X ęAy)SU2A_r&dd%my3 R6y"yL!Rkſc_-v"@0(V:,LډUiWU Y76۴>ȝ g G\V&XSH/ ra+S*6e4"ϰ$!ȝ1#ns³sa3qsͽt=\1jAqTT$/yz8*&m Le$V vk!Y=}&Sz|QT [LQxWcАs9@v8ka@^n PN΅ p)ÝkCr)lO.ӝxX\R5!븇uP4B'ltRkэu{7CA!6v'ց&I/ga,о}WUjPRSyBіNaG-kA'1x3~z>=)m7=v!E9ű{ T(h_+(0XcꋾGGN6VPYtۅ}J0{ 8GF3pK@@EͲ۪s8"§R2~ߋٸb4|XECb#SdHX$+L4# ^`b%Թfe)Ϣ.Tw7" B.IgƤC"p49O+ix8,'eAr-~6Qub`#j?OxR3"E#^9om&v1 ؉A6hU9wZ`?WH?٧.iXc8ZX`Kk71 6X\'VJB""~1 RAbN"A|9<рnAMprwuɛ9FXH_NVdLk Ż/wE]%e1< e#VeS B¹9V>]\*ѭ yAAgX̄XU+s_M{Es&ȁk$)J$M L3dH4%( N*SnN Hޟ_1ؽXxSQ0~4~SȍpʄiFSEBsxI3>UwRuupZo-EabQӹm2^z EɉL/R\匥`nPHA ]D!KI_b|2|^Ve (-g3PP}v5*fWe ʺ4*~zknS jޯWPJ[{D,i0a?؀H*K{?&e94!Td+e_lz 'c>?qHvz޿ Y" 9[O%2C7^d{/H\xiJLGY"ڜ_z+}_tcl(:.b9B,(2.kJO!Jq׋*c)zS s7QA3͡= h$E '0nDv\g: ~ ;GKu$y˲Фn_/oQ? 4}N7vqu 'i^$EF3Qgm+GW vR"Y Aٗ}Aktlg|4EYIdKёd)P uNX*օ1GWC$S)95,KV9LsGC_bT݌P wug_nݾϝNc`iocHo|i[{UL[li䝵Yk@A b"@[a0ˠ r?2ͽm4 l:0³LǨd@VxoJ(J` rq4L/"V TTm1h[:T bt;SԒB"1ZɢT/ d`+,IB(')''U`)s b0& YV3V-HߚPJ)HsxVǩEj+ݾ|߼ěikwBӂKoS %GQR쒃ZEFH@6zɀe~˫O<' !r4D)rނA4dDgPSUrHU DLP6$DOyLEFDi9\()&$Nyn2|w6PGb3b5P!YH=z47}tnb{CCF=! ݁"tm6lneSu@jW2<=] Qt-p`)_sf>*~ʆ9ӕyg+>\#ͱ7of ˍ@ITHAe` ?ǒ/١B)3% XT 8K.APɕAn&xԼ]tc0v}N)IzlԟBCH졛?0n M2)Wu)B={`V_G~߫y??O9$"qZwa6-e(?cw}@X;5D8Vg<6zI77BA*ٴ9h!%fv1v&1Ģw^T10G9!hIvVj7tӓuۃW 6q^,sUԫ&_1KCCyuo~zקrb4 ԗ՗Od65Lpկ^ 0c!\˓Q9 w?_{75INo] 3X!Qdmyi ӴyՌ]^*b=2OY󬲆~ޔ:#cE² pYAG5>[Z4c!iR4UJe'R EQjqÖƺ$ Z#%mؘF[׵8mZ[R[kgPm-B4uo)] R(G)=h)MJXAKk&)`^[#q&fBj"Y$~XZ'ejh bolj渷{o)&JW)#`/tgPt5Xӫ>[RfMuf͠!:k]-p+[DgIs6 tYz.&Yhtgd{Rj -hN@d)I!Nؑ-Bݨ$LxZ&=`)#,!W95-kgU,dxۙ,H*YjȢY7nQzrQ#ヿ31J@F *ྡH\]rIi|Kߘ4E~oSwP`c|lgxF v\{[(C=.nOw}CNd&*WP{{p96骶? A˩1wi36QHPڒyj/G#_դ]ʪvm7S+Z:]Џ;C"ZU |5: Z:AivǶƶor8spw{Хݻ?;A5qp;sN"45'7#zK<:* I)%65&]gfwwt'DwfxstbfUi%وԄ {hG7AbrE[.0y5 @!]ǁ*dW$:N ΘTH($=FDD7A*`uk.6梜 h5f]Y^)(i$0Gx-LJERU 6lCb#z8BXMxC!1i5Cv7/g>vnd>>X:a~lC۩?дx obG_Yi>1tm5mvζׯbLfwTWYTw~>$dU }f*%n~.-]I/:m:t0vCN}gA&k A>>@Bk8i+HF$DG5 ,TْŒUfڱj*Ei)8W )֓tV[]ZQݬl0@4"8vڤ Tr9*c"#@9 s!f'4*Lj]iOu1ZSB6&U*g`MDRE,,Q%)ZE +b❐C0j2zְEx1L,dX7X"Ve9MM-XJ!C${BkGE[ [d4z8)Y?vO,kGTє\{X#2 tWMrvv* ޅX\=SsG{hi2$e<`m&i"5M?uPA&SUZ5捙0-쨓že]N&zńύ ѹ\iH)`+YPkQq|vyO `=_Z1G<$N&AU1V1em>09{Ǘ ڧٹmy]'p2̳O~0缅o9X#P;*L8n2B(-Nw#t/npFmQqԌlYv~=&-RQ 'C6.bf,Z4:s R]4`\u1 PRӂ8),Bƨl:C*B*X;\+THXQvQ &IFMݧ{e-oxn[QW?O3rU?3ח'r~}s;cw?_;=cFqՒo>ۿͪTZs5/wl1 5~VNPRmd4~#(ZmbR;C}|wNZRӗ !kM+Fu4(;1m|郐T7ʹ!֊vVv `we'k gm,il I2kWZ^t֟`铪ޛ$n<GֈC8HٻFr$W Rśꩻfݧ<4-)vJJL)[v:`0H^#[ұyy)WBJOw[nݗybi(St B$1b}hDlY_͆%vӛ2H/_u A_eZ&wEJN{>4;Q֣ PMfI5} i j2ro?ҳ1ڱ) uAG@1ctݠv.+Z)"B` 6ĎJx&$}@ "A-((2CLWo>c'U|lnHs$`<2ow P[d&Y@rɦFg쒹3#2>%V 5KyC ƽi ֲ}7ZP-zR|]hŜ%3'PR4NSKIt4*A5zUV ;+yҚŽ&*bM\Sba Ɣ A `E*=n 8HD2ɨ<38iEt,1\5qk= jL $S hOR"y+ Mo/X99O>W(3 7R=>v iٲSa+5/\SrO2 8+V:D,~;UBۮsƛA 9Aa;ݦs \Q։e;%.(Wi[:2s>D3IsjzLd`݄ jq(SB,gm WRĢEb޵T[6@W+rO7ShmiZQ*۸ZH)0BR}1)A>;yx*l G/*`4D1!9zq M8>k. ~zp]/FGe0Z6,6>څftF^5@:hZ=ڟ3@_-A#23(vI RFOGJ_*fGMƙ'yK#U'vpۚeyq3ƈ'(_2vc^󶸌j+5ޫ/ey']%lbEh.vlv=:uv7kEpҶDB]̨*wQMJhR;\H)?~( Cjmӗ5GAlWS`X -W9*$+RWwZob:F[ Wf%'+3wR " Q~55/ՊS)۵*STٰDVpIΚUۑ2-FDmv|X]gghkdYQ %rۗyHKBKsK>E).v!g-#-g7 Ny~&)C NuB޴jnS#&|lLvɬl&>)} 5Nd COJ dZnRG5#PF@i9,* Hyz}ch 0g(FN0F(J+> [81oP%?a2"ޠ_ w yYx+5w~UKG";=.o"ъv61v7{ FaSv?h ZvB wMoZ7{b hxTݨdrP6K !Ẁ2afVwWsfȜu\~ ܁enbE QBXP& X`G*hpV;D= v Ch x*UDD|#e#vLGkK;Ԉ5%`4l- J̫qKs:Ċw-~dqZtj^14<[Qr&0z֙Te'4c WM\[~/>ʭ_yfSPS6kyKf$\ʤ$|3SpV|씗.4ҒKKI#4s{!Kw7YLG$dǣGz/7t=WP=>ڌɔJz?xa >2HF4+0ĄB9\4LfWywc>Z/ 'lj+ûٖ4ު],ѢEgm`'p4ZO Z$yoBq<ͨPOsEU=GŇ!s_8Na ]ZjJB@wv?ܷU$k70wDXA޶s:lh{c8vM7:c\'tcw!z2~וS`(L/Z֗=fl>̦Ԓ(մ|oH[W.7x˲J7gVAkqB\Tŝ<$~rԷ/~jX\YUX|Jv~p/nwOKԏG>|j/?^X'V]oP,([)nSnOm@Vy{w]qʖ_->]>QW!{Q4Bm jaEUrV}F_u_frjv -錐dH]dM$ hԢeJfe+Ɇ,vϗ-)VAuQxj\A:dyIL2e7Zݶ/@m ,)p0ʒhKpkC|HR2&!lDQꔕD+3 =s.rDV 9O0 XU `#*Gǽt6N@i67hCՒi8!@&@%L`{ZH ' koe32L0ieyTAmz/MM i&=44Ba)x qlL.PͅĹCh 8xC"vt(~WKTrʻ%n4HB[HC9n7sY)! %-1{o ǭ1J0T%+|9i aj$O/|~Q9>ĉRQ𛰗}fCФ2ާ`ѣ[BߡI.*祼 w|f DOs94]şQuE@g4Es^™\60&wD3@g1a|aR`Fn`VqI*ŬQ{\T2 LIXxz`'Zarwȳ/sRZ^m'.Z|cݐ V ^8@{Y}i1\`}G: 6_p'r<rzj|+--thJiyx 1G*'p橏%cn~uDag;bL1LYj}|?}nPDE5{~{7@J[U@JeNs+Ǘ{Խ +!O[b?)QB9UH\ՅHFҔ*H潽*,ZΌ2_DG'`"UPuep&r)\zkpl(9 Z1Z\DJ"!fx'$bS{t2$Q^en]-”@ HM,J 6E#$s\SLD`RaOt.iW[5ZE]o`YFZ ǵ>H8FE\|w);0މWOT(b,'X ЅP%oϴ-1 <:6{Oz6+5NwҦi$j ;ʼ÷p\p";4snSP1= U&jFCmw;uU;)`os@b ;O[aW~ϟgsBġuHo*-{^v[9SYջwH_小~[d dm+dEgg~EI[^j*Ө*̧5ͧ:GEKK93gKFPByJh!ٲd2z.B9G[)PʸPתˈ Һ AWjY-؃`" l /Rԥz7G9\U3nnЙPi_ӱ&[[S3aby01+9I7%W"1ombRɲD#d(Uc@|mPI<IVԊS.+=_.$]VN)"J4='TFjA 5}q6KQCɩy&_sYr0m@`򵵫n{UC/1d'V3޴;ě8/w6x|H SP^d XI(4ɯB7W7f27WOM7w7FTp6$[]+*[MJbǬ/K"WAk`BWiS̅B 1\kZнŬ#e/݄J]zcIӔ=T?}L lU{_X?} _GsW:nB" ?l`e[ʌ hQ>~:ڸG8_,\-y%2z38mve[>?dfxx}8b%xvwߍV8`#ߘ}ZqvɲEPg?}o7~K ^0ЅD=+z=+M6#pd"lJp*_|sWܿJGL-8hGjN :3u@2$:Fr@J幜yg:)}ծn[COQپ|!=囫\ lPZF 2 %ó"#N2bPIfwҶ4uW" `|tBI֎U-ߘxA載>6q68Y|3f*{/ Jy㫒>vU&&Dm |Rl]T+ Ҹ6ؾle{p!͸Hvd>Vr0 S(ҸC3N\=+VPL' YH}2P!8+mz7w|0# 4>_se [_G3x{Ō>ZcO3y'[p+T`RɹOd3*sTq7J.<;^^}1ɵe#QOΦczr AJN!:x6 9tgYfʾ!< 8R4CkJ$i/!i<)gNO}/Gd pƃRGjd}2HO)6H?.+h;g:F[i lꧫA\6)؇2֚e:E*NV R&3Ը90 T Ecv,v+؜>[!!Xzs.>zw&X|yZoLcSsޙyN|2ar& U-O*4_yM n>&.`QIW͂n+ jA )RJ}+ S]w+E,ǝ3Q,#qZo*0%pѸX9qI 妰6RAj#(duš-O~z?SrmH SQNd8Ƅ"=ĸJ9h^u>גh@Xر%u eSK̽-r"W^p*yDE%!tP[{Ho{fa!i:u- g(94"B  M*P65 [~چjYۦ_vήH$XUŦKz>453s0zf~~bAIB ?`.$`v:>R}Vp42|u`ptEd]02(g8°Y7pzn&g5Zd|$736SͲ.4@Ag2&1RnLb"+Z!ljyJYg|aZvpoCVLvҫBxjzцMSS\#Ci<3P;yک(TkՂpvh>ضW(6:.Xw7h RamƳX,nk19m+'_g˻qZ,~»;M2]?;!+9eT(Ǚ)$ˤ HA6}" N#΃Up.6IéE ;, JDo! s_M<)}X!`& oTΕ)s`r )[a8V\kn}PC-1P%?hPp+zBY俭ya/>aNGq}h+E| FZs&C9?=}~^ Η!ᣟSdMo""C8{2B!7+0ˇ:y8gC6/M̮1PH0'f_zp[*|K; s"Jr :4]72Rص&B O\VcpI9㡃A -ζ |eKcjA&:iNu{j9Z >ϰ˕fa};fB~1[f(k6LÑg7n6n.X`AdoQ3{G ?xS8>fR8>p|=E:&o1#t"oRdgDmK8X-t=F|QW`(8}S,"ߍ{LQČЅPx|&:+ft|b9 λOjR$,8pj tPtX'$N\iُ84 wo&LTsFU9B#-HuX+:ƥ/P<޿-IF6sVs5!iגǔpR wRR=x$< )gQXT\X,HGȁThꘘl<(7ϑx [1w r%Xт!b-^bG*@8a ~?.A:CF[k_]Uh۷HE=K]vb<ݽWӗRc꜕`{S_ӽa2s!d_iuu{gA*:ɴH {=/%[S"tP W~ , >(vS'#SIࢢa-Fu}.lE-7ɶ䤋 I|7KRJ);q&CEp #x?T<*/EGRͯ)HОԹcZQ-uVYM`yNW0 TJK<TB8Y˜ # ¸.㧩u&9 `._Ƴۗu'_ &lZ@nzJ ~x]NTݻv?~#\ogSMa%hѱ=kJ5c͙rE)(.g^SV+ [rh(3Q;~#)۴s rBۋ0hTejF4^ M. A60iZfrgT~ԝs):'=%U5!!'2 @H⑊=XFf{a,f.)f-/-Wݱ`:0u4F:YI ǏT V|YYmSVi[{O_ IK<#" ů|8t}3|$'1ELOBs 砙M8qTS{yެƀM '꧂,Υ<Í'S ~|ŝAAOqhe`8`<̦w)GUR' O %|>)[9m98 qz2/w~C1=;&zK*E%dofN6q(@L$4\0ADY b?|6N,85 ȍ)ᄡgj+@Mz'fݙ|~% {o kugvtLiI4 F/d2ǰ@OC1;| _9eq"E"p&UDpeiʰ3cDI#nb4@@Yh2뺅CvW jߔ090M/wX֦{PPfD9u%* j a"F_N-MbYr18MH%e`~O!8%w}*5`|ߏƲG FcHlGIX)D][5(qpj<" #4UKĤOѴ$¿ dz6z Y=ͳLN@[G+Yww-Ej=Pz|wmJY?wyxKB4M"Q[W8V (#qQᾤz s5;Ղ@+s@lYeO̓~|:q/Մ](lM߿\6FTC|֦N]5 VKH nu Ut&kU$6Ww.Uo\DEk .]\i\֏$W[!1RFTE*df2p@ʋykX"5sfnoEIrwk5^g#HK!lV6Szc5A฽'چ<-o 3K !Z-z.ٽ]w\^t]zq/`8܂ :D~8w"?93hy B%z}kBP=;5#&m5:0hΆzR[NNK-jYYtU×䘺|?\ MJL8}_߼7P e&4gJ.,~2 y)XiKNS6}L=Hi)kц)ѓc W]ZnQEYQ3ZJ;Le_T2uyTF j%0qqNe̟d4p(Wv˫SR3n;ZkZDRY ەyDwfs}'1D ߘowmOX҇$hy&.^)ԕ_ޱ#y7!Y{Q:5|cf뜶͝lF.uitwX1M@ p$%޼ǠC 9E$gDZ@K s}4Υ,UAA΂woߢPuűQKx?S$*z(+ˎCEd$tRU C ˢ,F$C1qlP{h7M^L%gT~ 'q!qEC,?3xo;))/Zq >c#2O &8-TFFH\ǚI"ZZ K@\iʸ4"zGԁ!:pAwy bDp&|o0#9:Dt[o5H|b&I>~ߡm|E9H2&F(NX!g=aMpʘXeL;tUߘt_$vIvKbU9+Ԇ:jy甒vbv\3ƩbV/$` l^A۪:-> ˆ b;_)H t.5_%%J\np%1,qO1R>t؞Bݘ(ܜqY.3AX3J)b N)V3*)2 #F(Ǚia*cEʨ͙@b4m- ⱡBG1DcbE4RG"8gkQ\™r ~\W ^(y}N"0yVs3yt؝VV˧/c}0 {rFvXJNbw*ʞ_z"fU-_O?`O0NJxݷP' PaO_߼1#lqLm{<3 nl4ĪƃH+\qrH{΃.ЭSq$4Lz[ckŧbhrF6 vl ?zR\ϋjޑ σxb9ȬIL4>v={I?rnjed&(_(RTrjr2*D P ŋv ) DŽ_f# hNNn;m3l$ K„G>ÈhJS /ym#2A kDX *=n7?dSnTq4%'H Ŏ`d{]±jP)8Rm2E} k[R +J]RtDy͉ykX@8YTzl2)p} ӀD} 1A4!ǽo0 p^5?]*Z řID- ap-EqD, Ip+%BX+I@{  kbҧhZZ r {|E;'䓾J ~T$:Qf1VLj N,|cK_12B$bcX4>{Q^g.rL6y_P=YL1RG&։h7t%a%e Q W(;!%u#lZEL @HZb=V "0"F8EG}G}AИjOuYϓI~Z琈BCCJp@e&B)/:m]xAS4ad UJ EͲ~%"Caq:m@jUC(hXR2 4a"9n}z~ >Xǯ<l$ܨ%HO %l2LGI4 .:Gu8S )^d`e}D,$F @SdqENv#CTb R~LTv"iK0L8ϥw&K5x!ı5{s>_ci^ůpa hFgۿ)eW6ggL0~g{'lh`#\ =c'R6޳e pAZmu6!myG-w;+8`.ƙ⻪ y_) ΊB=,4G:K(SqA|F\O8ږ@J a$>R17)#I҆+ CzXόP1mc3yCT0y>rX$ZϻR/^ I$;aX %Rqy<&Φ {CoiLK ( >7."?LI$9+`0BhET-_:T9 z!))!T{W[yrzd+O)s|W"_M**ԄX߂ W n-Kߺ/  29^zA?oN6&Ι;g+qҝhE4QRD$IX dž$2)FG*6*fdS*aZYk*pEgx ?]ҠF i6f0}AA|o2 y؂ړZ h[a>״4 aYŒRS!I N%fP)LX!){7YƩ ! eI~I7#AJhdINND1拪)@y-i`0lX ))|VQ& PY H"Ⱦ#lX.~%E*d+D'm :KS $扫cM$1&MP%IŜ[5ur}¬~&y7io#"D`zHX"ՋTʍJaq"S (-HP[\O D DMG#x%i$-SGR#"p[gm$)ZS,Ɯ=ݹOk|<@'8k߮?YAR}3|%$# tQY>vKno]doz+ՏGa'x"w?<^z 2%<{҅TH5VtT%*Dr1/եG(SnN/ʺ㖗2t%>IZZ `:x RA7hADeZqKU)CyX+{4 BNS4$%}`ׯ{k[nQ%5x' O68Xm9@^S~ڷ{-4o(t? qPzÞt05NɆԙS*,K'ٹ9u~;<+y kiԗ?}72ZY6d|_-!CUxg]E SyId>Ts"d|ǵMi? wP5I<)}BzĬjM̛ j%1YzWa"ws?I:]x#)v&++${?h|32ܞِ@gS;ddsz/\g88t/0o `O@`U8o &I)r۟dR*"!UG66R,7MUI8d׾AARUZS).cV~BoP$$Xon*:ٰ?ZtDY *W&O5|24~S3Y2ݧϮ,C7Gݣ}s]v;O4&1?aхxPlPERtPg$):P"9O`AJkTVdLѢpdݜ<(AWhq۠W,<\ɽrd )i=,:6a*CC\4,cR*B8X0鰝侇^@fʝ:DÞ{z%`itJQWV*6֎lC՞l<1`H5]f,{_c*|Cq𖱖Kj(9 #o(u{Y茪aQVc **Tl,^>Q#9`v2u/:—-(,k co`o,»׫gN#űS Flz}񽝂-{N./1=Z]^\ i?]]D h:̈4Gu-!V?dY!yKX`*Y' 8t2PsB44Fw`yNG;5\tM,~(%evQMX̉G̮ Fi,zR(>{6 3Q!4<֑RJ8 IJˉ*2YHqB?(9׫`b0X3@_s55_`&nđXU!j+ZUx~Xp7Tи%;W͆,8Jb}(`8cZ'(I\ ydܗ4}RG.Cu/,ϑvJ7Vn: ϩj߹c 0գ옇CExbcۉX#\;ܩc,KEBBPaF@4Aus؊yag݀7Ib'>MwkctbXMW֊DHXG7eO x WfS7k?ӼP, @0:e/wS3/܈lahс#r-uQh(n˖p;[Xp$;Cg* 9%hfv"ylGIs-k}Kß=`B:+?)+w]M3 Dm$wҪZK>뛧%bjRkD j$&4Fʄ'1|ZB%SG1OE"H91RJ@S,AS% O)& qJZǾp/>uٴ6hY,!er.։E"=Hyb e\xf=@WA26c;]]D3_l``7s{+q{k7aڽ̸sw޵#"e1gW w" Lff_v`lv"-HrfOQղbw,Dn_Ud]ae≮fT /5\3τ.h4iQ*YhP]FOeIwn.9< 0Zj*_!:NbWyxaAFAR!aVJwjԔPs5dB$6D6áx.ohR߷dx籩F& c͠Bp|β`kϩk`畲n9'Idбz8 &JQ#FIe툑|xV hv޳S:u Q{EiI^1O 2`CZ*K7ҁK@msљ-mY8šs r(K&ʆSMl~ٓmт%e( ]6| !' [=-Gਿ)պ&4eZl^;ҧ! aj3:YdZ}>Ԣēy*9@]%2>젣\ ?@ R>vK&&h!<9雛'Tj -5WWjs崇 ~LPCh寷průa!p)c^;jcqXQP*ƺfal+d<{T1?o$stt9%Ul",B chg _>Oѿ2NZ(>ph~@HV;D5= c:ؾ6nE1ڇ{b-=3W#7qIC,Rj%WøF (w|ӨNq*^΍:aW\ݞt:{EYL{N!.Ä ]9? +Z E/ T3 -Hp,J6T Hϕ+N NB`t$ĞMp.V NSFEk]/'jDY:$h8@$2E`KSUz_B`\liFH7}v>]9o/+o8ansԴZ®ʗAcD/Cm7^C5?NU%=M&J9C1Z0/GG~ +Ɋsӱksrs հKbY=װ@105uS1dMh]Vհ>R9y2h~r ^å&-&JծUg0ska`gђ)ieġg)ڛ۱(=S$\eW,ޟjcYmy%=9Q|̿\}1q%IbJOi*TSOgdQzINB]b)_mB?X9m)<: +&t-?~쬰7x#U#wv-edG?:y>)&הּ!1G)4ªmM5We&sZّ~ p11#e]tl]mںv&πd"rDmпa.yکa*l5"f{DB%ޡ oXiEY6X#w$/fWc9ta6%rh7C]FRw*upc1(eN[Y^Ya!8 ?s *J%-N %tzhTe r_rog3oq9x _wB奿"{0Z8'(}re6/{X#2Wx}lߣZ=8ELf'TW/LO^MLR6~H fNDNPR1`ɤ]2B*<&5# xh8UedU)*9+t+\LkX\(YI7$"gV` cNאÝ؅..blvȱسSRyRN>{59S9 JPs{͂0j_:: /-"̩BP,OQFi[oQok"i%>7F1lͬs]{# g}t7ٰ EUtP~З_Ot;]_wؔZ{zK^3U{2=\?z6I?As .Ð S MQ!q4wܟ X9KyVMd'hĞ'o&O7cPKdJ%)rcwei&qbܣK?E{3Xl>~?~l[Heܑvg̱G/WkHQZs2xoBz@X">U(E?GNS35 oE#zWoܽ!V<]@=02'rcK&[rC!zϛs"(2ϛr3*8TKq8O[m ц4[qf[?)ڇ8B |>X)Z%}BKrЛrHIXjܔ>9n%^ƕxWe\ՕXM`2HJ@ o)^•1XM h%C!uTe%|oQVB}(s\6i"tČk3Ps\s'{JK$ LxnJjp%Nnh `-!Ɠ(!Cj9vxZ |ft5B9L1ĨPni|u2޼|"hi,M)U:CMɩ0;'$]Pb \ _u\ 3؃nbWͼ~J_~p?Zf!*{HJJ?7ExEr `* il$v5FA1# @-shDw݁0wwLw!b ER$d1Wʷ+^i7ȂY0 (8Z21C+ GǵI0oyC0n`on~ԟ$Ļ[9%V -|kߍ?L&]}hM[f|růE:V"sOz2=*rTFS{ozc_ IwLhv",CLt8-77Pe e6ݟ;Glpz7;0j9CsWJv;NKpZd7"5*}Z˵HeٛJ]*׼ u=I8$TDA==3==7geD]DO Nx i΋[ct95\D>5ȩt_ݮ㇄#Q㧟61g:`4Z?gn|q~;?qy{s?uw?x|wqKͻoW7~du'j~7錂 }:g7Ȑo]3ݏo|7IB @{wܚ/5ǿwǣIp3 (w*w?q"<ō>s|yLD`^w )iX<2BT*Kff?`%GS̰o-i^Hɚ5Sh韖yoNrhПӷY$/EyGGP E|嬖ܙaI-Xyaf1.sc:TeCH~M{_$~<ݺa&WQӏ/(CcB<^#ܓ(wn~2R(0 0TuqB_V_;CǝFşuAA$m''WG@7\2 oTA2v뿎@>Wv4 d W躃?4!3}'8|5 F{?ǀu;tQ;kzo_0flYF"n{Ml{S{? Cacc>}g2Uˉo[K\GL-')m :.j9OYKC1-PXjpȀ#h6-&G,gjI$>*]g9MuK+1-t146cj`|W.mI̩<1Y̌Tժ\geWvqy)쀑pq1c$v<"TP_:)< ceg.*S0E'[$]#-AAS Z`m)DQEmӭIswLGHpвy˜6ݚ8Qr}&6ZFR0u-DXҭ]W6ki3JczLY}m6皳jmV1%6g`Lvnis:@knisAcɸm6LU疶9g-8:Vbdfrc TD_̼}|Lţ喾 JB7}xmhx7A {5@9LJr0NȦ!=-G+(3.K(E\oǔf3a d!=ztDS^Rԁ@pN0盺;]1(ّpjsPIRGq;H=nRwYW y>1ߋO:X#WS-0(uHބUPΒȒ.t,1,i☙#ib?:u,7>}Kr'O,\ƀҢ5ݝv=IQbOfI͔ AҫaĺLYlIƿ=}%q= L~f~ijWXD) t肁ωb"pih'XS4JmT_tTIxwj1QE ˄*cmqތx_'Ì$ FPnNޔs$%\:+Z \*-ؐ06(ƉF.jK>M0d7Ns33 7 mzJ7V?Z,㘬u.5 0%rI$z2fM ' |)!5rbeYKlSQ;($\k,hŝ ߨ-,voŏ-25B8o,/6@K"=kRAD0m2xDD0bUkMĠMk!Z+f/ž23T'cʖ RHn`E<<:fw}KV#C/HO=5N- `͚Ƃ-,HjdQ hUn?FRm *^Z ^5 Kwn] HY,i۞ =sVMq5;rs=s^8+7͵^r NE,i5dD%,0*2PώPs}rBq5]ExyÿPs ٠ xZrGka i(ۖkHP.5 d0U :؀>yPߊLoaKS'L6, )JՈ5gM}Z؈ yk[4&RتNO(4㽀2'*:1JG"&9E4<.G(䡇rxEqU6l9k%8A1zjz6$ f1g2C Oæ^[vdܻЪ>Xb m٠GaepoM3?V/p..ܼ0^%a׉7*}E!#^!&:Ap1z Ԃ/3IY]6aʤylO寋TbB9}KkJxKb1fS#<q0hF /G e qBOo~҄Y߃{0Q~&!7A=A&Aj*h $81T"GKb|_M{cGq=Awa#0&Q#ڍ3D 0F _7߃Got[d:hA9MK_nܞJTeK8yٻU3#P dF \3MPW+酌 I!PaCCS/Dh0E|g?Əp4;@cmIn!'Uo;ZlR9\I/ʶM4lg +mAL /El,_+4Dk@\3# Uz!v%B.V4 fd9F9SŨ\M6a˶TXm^.z298RWu=Boa_H9Ha 51fB+xTurWH*> .H !BO9K7䮫Bc/\D*H G,#-NAo;UZΆZx>d.5W7y!;F"v&XR(O#7Jx`gr!y+ԣvJJqO*}A5UZ2 /E ~x}*n|tֽc@ʓivwwBnIW%ggta,Gq sx2%1B~C]a+%hj}ť /fǭu3xU@jeL-e(T#ۮN׫};QP :dLp(`(`){; qhɄh-Nnj&1|ਐ[g'IN h&ȧ:ǦFoi Ĥiwq.)4@o9'8# z`8gu>4+/gU/[wٍ^d*J=TK_{)R,t7DW!&ʇ1}~4%YO]jȒ1ELYY75ci,TS=v z2DS8TmrcZGUɂ׉,D zQ1HaZ.[ɠ6:?B#ƭmtՒ\(&)+E-j+Z쀅D!: Mk+I)0SG`s˶MGPӻ&7ݓN~#Ƽ _3r)tUX&Z F0D[5$O%jGq0J>w? 1xZ: vn5˟~ոNInmhy#ymӝ@LQR͞Ly#β?Ǽ92 DHÄKgz2|9jbDkm嚢3r q1A"g eX8ȷҙ5p2U-td+4B!ݖMu5IA5Wc [5]bg= lRen!'Ą&$ȎAn=J};8%EL#)y>{H|eI׵Z=x'(R-=ƞ`<0 fG7x T]PSg+6 ŷyuN%d*N$sT+6/0֥0kSi+p{ܚ5D%,{AV mI՜ J]2&P8J J 58әI8ty6zI!Ɨ_7fx< f%z>eY{yԻĚ0AH(7 jAŷˈƕ1L}s|c'z6¯wm#_evA@> {Ax/dhG>$g')EZͣI [WUuV 9]mS@]^d51'[lV.2ZZ!'DgH.Y'pٳ|~b˷9> D((2g7hsAJTZ47m_הDU2<H).A// wﺠf>]j UUFc΄cf@{U@ Ɯw`-lJ򥣷~fa{!ŀ5JrQ_f4|{ߊ<ɚ%4I3&!@F6l`* lbR)B}uj,|Z/osBL%wҋkIs2h4R%ㇹ>ēts Nze-S-d˕HԈ $ͼZ,r1aTA4&iD2LX,}o?XT兤d}P oa(V^W806Hܡ}'jt74PmO/M/q\LẗӹZtuCڷڠx!VZ: r|i1v}\| brS༐p]5w6{w-V,ϛ.}.[i8}q`ȕ+Q5NC/Bj9vkN9{M}.+V͗t'ʺ?u9o.n/؊Pł(5[c X4Vixo[`E.<z_?X<$~].z~Qްi%^ ".dҭ0U3䎉EGeź?o(?eW?Fmx^I??_rΗ;wϗ@={kKL\dTZ)QlLTZB %1Ep*Nic;6u=qyf]% `n%o=N^s KZ>ZWg3Ԟi0$A&t^]s.%>Uc/N+?j\cUTGqD人F1qRX|H\;76+nakMn7{ nߨxDC%nڄ~ʋC!^x6qD Uql%;rTy=uyPCNjJO$$ߎ"KyGy1dcdMۥRب#6*`Yu޶f]#bnj!UOđmZٕBTǂFܱQ#ZߌS!0l}B+xLPJa^(z4+I+к59uַ;}r_;}PXZ~0Pb@0g"UE bjxSǭ9 ^m% ? O(+ O7:$Μ|(l5>PWZ cu0ȫ`0_9cVBv=Uتbv=_L(N[}F.k^y=qߪ È9K0'Bj g]JRxms,G\gqV,Nˑ21eVogѶs|-:*ytڏ:Te֏nxsr?m5ʬQC2*6e6ʬ!hے|{vV~.>;.]h >*pX wטp-#C^ߊaq=熜F53iuV; b\뾪]WiXs)zX-\/sKO,&zwn &0 Ƕ~ *(D#Y(n,;<=35I8w- 1I9L6Z 3)(#f|kU;jL""DD 6RT Lq֯2a>'Q9ꊧimb f4%pF3"fr #]$Ղ\,&!@NG 9m|Oi h"@rtFbل['V 20 EkT°me.sa>1"Krk AbAa%2r?D8zcX"0e*%IS|q>& _(j%Ә,\TԈH$l93JXkkvڮvjZa$II@ LD+i2F070,6VSM ,S$M IRxEON~DɄ25 yQBL\$"*CtdCLT `Z&,lQ ȵvs†zĎkE:*x>sC5ژ'x STOv)VĜMM@65Vz()⧦qM4Lɝ2{Ϧ=sj9U"zEsg!.hD Ϲ?3JOg.X>ecT7W_ʅQ\nfoZ}`;h˓W(bADL#bXHyCp "/g%6r|Iʹ]y9M7N^?x.VQsilË^bzgC=ǯF^JSx1oPp ɡ+H( ڶ:x 5ҰX+LVWEA2.A0UNHd$wDDkpmN\ E`p ,{"3(6=eqg0Z`z 6QQ\Hq)6ET:K,Nݨ"R/Bf8:}ӿe]]/i֣׳OCsb_}Bԡשpgկ~g} D񇐆b!ł^Ob"&@#l㼦noS|kTvsݛUR XJ֔=4=mukvWN)֘ W\ؠ%F&Zno>$+|Je:K[wEr˧Gv4;6g]O?k\TBI ? oj4VL, 1ㇸ-3џ/?9#`:/Vu"S>~F~߹t]TwPx{uE'WȖePJjI{[ &(:[m*#u 1V=;#٪ِ"ݖ;k-fs]AMS<- Do\C7\5hsa*`Rs?E'6ECS?7y:SS. /H{PJZؓZI,+Ր #DƖl)0N\c\9˱ /m8=^8 XBpr޶1OՉ ]Rn#YIk*h4&Icw=__ܨQGP('Rn)n')c{ܓ݉boOuRɍ|O>sMAp߸Oz5ڎnMOW6isbN.Tb_HO ǽZPZ7N׽b{&,?l%Ȯtb ^͢\M۠Z ]c(:hv3>0) `ҠG@ЎTT@􂵼 .D]>@gB;,!~_ 7鞡c %,S4r,w4)̈́q <+#x)4GB '}Yp6Pe ^;(Lƨ61gֿ0[r 6U@sZOO6>M]9?1@r^ZY螆XAVN EZ9YCSK9~3z"}vJ~}Ӣi>~ p3ank4FIC=yNѓi1כ $%mPdK_'d侸*Z$fߜmŬ|pG3`q/5ML`t}?.rμ2^;9sVFs=R[3]v1 3_[=8E8|%+km8Ooi} 'YghŦ;v@[kwU  O %u!^"m7WfMNmZM:~xbjwL5zx]ëRm-`{%]iջЖoU烫ŝ7JɆ&~MMxzp+j|-5}s^ X\)CXn9v 5\C>&agZH֞|nS^AN?/ڏ~O6z=2i|zƻ}3^Pj90{P/ =8E ńZԻAmq pZjeҶm\(/:Yp2[8>7hBʀs͕1'4 U-eQUHm~}4AK\s"| qØwAjWg UPʔԱFFt EUxQB:]2&[/ӂ~[W^j-~y%[t*@[6@>mnz[`J£i 4fpx~>[8?Z~m~-,6t{^ԭ+l-Lں~Goۋ2֮DD0R|ogi .iV/zɭ1p5oa1Z/+T큒兯\4LXS:\]Xv宽b4B1E_iܦ?.c-ZD `w,{| oL_`j E'CHqv$n|c⊺=z?6۱BŒt*y0:ZD:@Ԝ6&w>2+Aw*RĚwR4SCJ 7u`Q6^L;}L9(V8:K/$O&@A/^ӊ3&,@4X7)}MֻԊ6k%~3hѿyxGK4T*1KXޖ%ֱ5輶~F:"{c?y /n7t ]?u7nm'? A a_st1\UtZپ %Uo}N|h1Y0.pZD !T~@X*v^䨧 ; n][|:$Y b`pdz_/dj&D^NVhޤcQzR/`5xtBGK`%x60Ɏ)[;cP5XF]ils@]t0d>"%rZ +w@lY2Cp_?̧C }W5Q]L{[)5x.*!h? oÛi>JUH7g@ y|藟ߜʋl4WS>~F~߹tߗJrs۫+z> J6 okR> BDJiFqUR[ْlKfU+ pWO|rԸJd;Vk|82Zrb*,y.h)9}yK v *SD5!+bx>7IQ9nX JyX&jԥE!B$oRY *Xx^+9)Q]^\ fwSMgɔCV="f%z6r|۟컞Y 2P*`HZ.q,2:i1';U7YM>ra☬d$jfjC=;r<)ZhZԬ9\S7{,lUC!뒶45GO[ߢ[(sAE<Ž])Xrri$pj߬;XO/ׯ_PآNGDT]a% Υ&pvg{_\6PB]k ξu`!Dx,'g\[vAt0NgedɗbƬ7²) I%_N- . Е=}Oxc6COfڻopͰ( m# `ҠOWi4*9ZQY.hP`pX`ikʊ&~ݸUomD4ONV0noHRRZ[pU  =J|ctHA;GAkI,5%ŝ:ax3r t>% 2 Ѩȋ@ ֜BH ;W%bCQR1dCQ˱#rtA[L&j"S^~ߕ`mQclRRMgSK9-!myPH|-HƠQ^?gqˉ,iiRh_r4'X^8D+ K)p`Jb'08&0AHCdJRʘ.41B 6BTVGm+/}ZÌգ}}x4g!ɩmZ g/Oņg'c:0Zn9'q*,; >6ν OuT~}6t=*JpG-a{NUF"5Wz[QBAy`VQN7m.>n ܩ q;]?q52,)=K \5qÙn#r. `rOI >'/w̝^N.b9AܧTS?C~o!p_Uqsb霴|8=J̫'Zi`mDhH#]fԧcF˫2y.^܃nto>vݯcB0QRY-nϥ b ;Yޘ9&z1s/Ӈ`lR ajp=HP#O>>gy&77WEo6n'w7TVoї({1_%\9,w&XO[}H#XЎponޞ7sƚrِv[?#X`\o!?Afqر>q8|Y2*p qpP"[6`{!u-.!u-ԵY{ "gm89k5,!pMx!Y}Jr})i}bfiea*X!9޽:huwHde!w~SТh,|ޕz̊QCX/1}|| :1}tu'#WbBhoc^CG>d^j :@P[HXxΘf;{+0Ѷ:l}?)|TvqJkv f k4#Z ie !tF%Cb~AQXYkV692ko{ ¶N~k-ע5"Zi *&%T^8+4, ߔ 8jjC' 4 wjj5E a!<:Mb۩G%2E]b y- :-0ڴ>}arV#Z#*3!I@A?FuÈsH8~sںI* <@1=)"O=Oһ>uG)PTʏx5U xU`"9B:B-i܅薺]9n@A|Zn:1 jQ}> ;eALÆPr@ akO7G' 'C`j{Na 'RUMV=`m<3@}L}zIDun[#LJ]^֋k} &l;޵>JObThc`[lQ8T]EPcԝЧ~uoO 9.HP+ ǝOV$UXn:WH(;4s(&"eMknHAV iE|?w<#()AIa kzQ`>GcO8OU%Xί~T֟#Ҳ`]adQ%̮R}f2`N%J9c voEv=e=D##mޕf`z<,S.b8Ro3_=7Yp zyo`:8Bkp- Cж sD;˹mZ؎Z B*i|J1@sPI$@#cnZ(ik[ШZ PXE9Z(\rhfLF(݌Kp!xL|jZtO?, U >ReZfȫiZNW+;97?P[ 'Ȝ*v< b¬өHL[ChV8ONOu]*g) mY_Wg:;].7덣v A@Ϭ\sdcW˘ sv8OwWI.nqqb~=~x3ZIIkEhhۛ5gQ:z}< m!g' sr,(`YCK`ȿ/.>,s;f 3J SRC-h~iujOOH?*Po'(<\GHgd O*Fwu>C5* p\pՠLQp2tr"SAAvR)& rU ųLq)d59l8LB6&]L8$ӥ%a")ZU.HF-cZcZj@dעaE4ZM nj`,M$HBX* s8.&Cҕl#Ui>g'/z/>ݯG 09 w͢[m]#fĎH҄+ (}ixڵĈDT!9 0PWHvxqP/W_CgٲhbgÄ7dc!h8 7 ݴDFûubbyr2%J/0Awj&[ 0*Aps| cD`=ȟ>O[-oVN.|#<:9v^?,W>ثo{ѧ]Nߘ+D|z lMeyطcG7~pGbJ¸O};plϝk'O+$e911S͟v9i;ىOv3+;#!pM)-kQM!=H`;K3Jb(研SY#EJP=C@=Hޮoo;UF1jC;Eڨ/: B0@PnRw\hbAԢ@9HJe,k b,T\L%%E1sŋ/3xjڱ_cN ~R^"M^ ,U%.].b KA|q6xz/nD|Zsm<@gxa0"3aɃRyX[h!EAL^~Yǔ*V+j./^1"޾.1sAA~\-}BZ)Um׻vG^}yX{wl\'آqT+$> w푺(r--O6Ò8A)I"~$A߁TϝQ1 4Xn1#u0yN`OSm6lOß+mX2dc?=&׆u\|}捂˜բs`u 5 > UHI)pO32 o mw돋nܪկvs}zQ- 99$⫇8C2Jnx۩5 @C m#)0ւS'[LNyHOpJ1>Ŧ WW®3&wL!L!o:Ǻa:ߺ(ܐrt.*a ; 4 Z:Z{١pc(C?(} Ή[Qlff5%te ph;H a IIZ2:V-d jF#o DP]i0N,Lq %3)Y JeVVB;3M`S#wd ^B O$ 3lTp-QI%s"E!\"]u@E @eqmE&M}<|FY{ɫ;~qxj*U稡4yU!;19'Pof#|_hw.>ϝ-vshGEGM =pqE~]GU5p?Ȍ<,1_)y[7JP@FZV;@l9T* #i#!$:?xl52tܡ۵WբUz z vgw=*ؕ0 ;6M|ggnU›;4Tׯ{5mVfC͸MF?fݼu2z{uV%^|?Zi񋽺Ñ aD|K%Oһ^գ{'`4=*REpH­Cz3ofxR.Vr;^]~Ə~M[<Ytx'5P* 띞jwy7y,k뙈c!AgC3Dݔkz5鮗³EFaClnY_僞 $:ɮ<[mH3y65 igž$ZlE/i\h0vgi'")XԞ+)JqC&4 =~%FH1HC`LbXǺ\Dőt_Am٤15:2k"Xn r*(iF:|)/#bᚷwWW٧ﰉD'/_q"ܜ.ߒd'qJ)!+yxD eDjbCVx UEI3 G9iUSf]cG'Ts(gb&4SpNkkM/ɜ3X;D&ى)дKE۵\z1Zi  9ST7DR$/Yc0j~¼ь^j5cɵ -<fƫn,ԯ ): BjJJڹ#hoǬgK!NS>ȍ5FW܇2(Obl Aj˙l4j>߼r(%9|F Lf{( @.NLV b̷UxЁ(`eήr3/8u鵽Y9΍6cPM̘6/H fh7Fu!N~r=>5n58?1t_riMg ;=╈ 8U-cLISV&OhvA[718U#;PQ<O>,_I$7iiB?")΍}ʅpU 5aV`x)TN(rDZ)WBg F J5 @;Ά{3e죤ğ(r$a TcK[j- \>ğF=uםBW4TF.EN@IL(L CQDv(i(ɿFp-=y7Q@ A#](.֕7(yLHx77= 7z902 -- 2~siXN8|mP] 6]EC{[~焟8;+KKG\-tvyins:ShXLBLBuGR5z}5Ӡ^^l'ҋ'O;nsuCs!d xejF W\lʤ3^,Yg14CSBS{nє¬tkW,r5! m2-Pp*X}F+tPB`NޘV3{җaws<~tK<^.o_\^R2aTMHx}ʊ]zbՆioGf/-蕀"qsq059ڥ+'@cׂ_-w3ZKQ\6 % >*PԛR+ tg?"dRimοWLPmΎ]!rӊ-yF!WS|)l=nC J%|7! vȦ@TN-QZ[䨫6 Y4 Y=dvX5CV ]̄Đ~ѽE!wL`"SU'L7`"@ӧ_ TS؆zT;vlE># S4av}Zq5tNG͎QC͎y@ iHY"_)r13lwcg?o|AS!B1Czϝ]͆ȼБ zx|̩,]$h=rW!2' [uo]&JlMgjxh/G&@73[Mw)'aLS%FGK"RDfY(Y JǫO`D2{O2̔RyfWU>( w r# _)y9 eyXu3AY؎85⊏lQWemɒ7(AtztTFQVz+ZrSyWI IE-6,oy%;vWSzg\~w-IpNA`-ĂA-q7}ThD)uZhՀ躣O GU菘UhDHGHPİB>Q$ 4HSؙF9:EA)s%o]B i۫Sp 3,a"TòAQvр q=)xs ꆾ! A@uwq|Aگij87qK)cMoPxv; 9eڦ,[nP\ԍ\f U-$& Pg*Q=^4D;mף{iUTҭzU:>s+¬ W'bD\ ֬%7Z.(`kgdi j2_AvO%$Qd4NK3((j} oSx=gJp Ӎ24q'ٻqdWT~ڳ Wa*L:d2[.mMtIӠd( HJN4Jl@_U\Sk0a y$Qd !+XPT)Q, Jut[H&Ob!h7 ݘZ*D:J4"֜J6\H.sLޮjx+NЙ1(Co2}/c`G>^uo Xh*y&JКa>sשd&ߨ aW12B< c.e`/G!!Qr[me0rk'c\x`|z 5)fp& IN*lbCN tbtڥ6d{x' /_!۲KtL_J8+Vae TTq6 KF GHsX 9>-6 lP2E<1SaX[nj)Krr%uf!R&rBLDS". > 31wE:d%s̒k8Y0UE93q”)ν%`#RZ(Rt- =5g!GIkR6˝0s Z Bu5K9ҠZ60>v~|᫡!"1B/dR^zևD _ B4YC{D\EV-{JpwhT#S&=8kE{"0,&މH?GLq[xKѰQtw?k[.,Rj-@W(ՏiL鮔K=kz.'Va* |DerSH*Rp6l )X7⃓nOw.j2J!Q1>5m_};OvwHxEJ='U=SfΥXIEׂdZNP-N!B Ԋ8x:˄qU.Ɂs 0xoGíu48}e?nm&*MLe%UKzRWV>AEckY#"$W/3~TYeijUX xf0C!B7h*9INL-ؔ g?SxI:}llT ZGuthT+,jrl] 5R #eew4՘"y5d޼շ;)aq;2csL-a=aan9jSFT֭G=1X+Z*i:IMQ&~; ƣ"cW&Ebŋ2;j%Rbq蘅 Hy6c]%c8\'2R!ݨj6 3FBJT IWOC)1LTsZ;Q" ( q zrq=RLuOzT@X񣽶pEBg#oR$a%CKԣ99)U'Kk~&ANR;NLKa;)zҦzuY'fX5(E-5 Sd9/3: RBڇ̀<]gPdԛ>p1Uy<P,QÓ"HOt^1,gA;cl_UuW߿$ XJQ>]h’jxeR?t;c-~1(qK_Mw<鋩4Am+pj[rZxH,3/Zdah_v2š5Ei<8Gy/b)6d 4={/5U|9 7H7=/! ^lzX$sMCFd󖚬?E٧ ,b3wo>[DPKkTL JZƖ#R3?Đ-O Wp/2=cԔ:G@] p[-.#I $[)EN_NT"BOdϬÔЮAgE0O, iԥhUzhzFLEL#'.,-Ѩ>`*ʹn榋H#]`r4 ܚo|[5ߖ`MT[<͐&3~\XsR/'+pg{/ Y!|1o}/❽+7Ȣ_~ʂ10cgv-}T!!Jv:I9jj!3gʩ(.2Fsc2S ly4j&Q6Д)Yi],ɰaII%g"Z4DjfM2rE Oj~|3UjƜ!۫ZzmU`Z~Zpa%:5oj0@)JxtWaWJobR!>ЅB7qjSy|gY:+]%a0H{?\ {ˁ~d4<!B[-&^9V?ݹ0xNO4+QEpI?9`ARߺ-׿}rƓrcKݴٖ]s66}UA.9J}^\0ڝ{P1C'"S2hȡxfu -sw67f~z9 Bp"sHZoҰ; @ڡCz.\dt28j^wj &CMHH $8#ZIl -)qZkj\NeR!(,G9I% @@y7G7`yW:Yz)ӥ޿, í/H5_oSc?= wiѿ}rz-D Ã>oߐ>& M G鱷ǮdAy .x=9/F7W K͏^>櫹~1wU#M\#{d_S-]f6qu87`߭0c΄H:?/cU5!J`+T(^-߅BrPoOD"S!Z34Mf0P2wÇ}M.楪aQ0~nnCXjF^L"KA e@tz}qZ#RS*nd ?}#4Y )`p#vf ~PT]U&*vQ_&*հ4EW(~ ͪz ]Oax}=[|(]' Sݜ#:wBr^)OkKelO:zHO?&SI@!'y2J+QֶD#W^aV=۹(xNv܍J|Wv. ! !?{Wȍ /qIEj;Ywݞ;U5EjxD"K ZG.Kh|ÌٹcL8,)0d܇M!QO@1,OIsL຺ꜽU4;u?WLbh bT?#AĞ1kP~)Hֽ ieD# A"q G a+=i &CMchcH㇮mk$DcjdEoԝ5򥤥cE&^#о} gAQ7⋧]Vm Mݞ/(o)|re dg8(Nm]p>G_p.  l_YH\"/B,cPgs;ĘBjr%9k pmy!UkFZlE1RT(qxjeP_^ϙ=FT-$S ?} @9QL9p3Q2冀yrqFZ*E Xѹ#ObJ6tpq׷L.~Sqh4j᜶i-kg֝9?8mؐ?U@5\`~\VVwmJw+kR!snXz`:gUC/>Ѭ[??ߵ5[%n:s)Hl7\Eq(}ŽWQ,`&qdNHA4"Mr$~QE})Ii, K%1(Zb{y齝L` }q{@7=-|ڃ l4Xl1\n?|C>D$+#WeSM4湓VF©e SZN~r-Uu"şT99O|z-'"%`K==0^dMO}Qp~/L%/͇mQo h0}< E _o. TD_㮗Jժ"T\g,wɈ,2{ nːbJ\XV8x3!!8Ǹ=  _R).DN+8z"RMɖg}G|Y`r%l# ]/R7uCaĐggq["w iF>wF'~}6 >Wao2j*;&n8ȦuG0 w+"z+Eڷd801 ;>|>zppPK$q5/k]!+[EP4m2$n CGEfVҎfP<KykaUdJL*@`41A5"2  UʏDֶKX1>7n,Nbz0q #K\b 8b(¯8QM_s_oW7MbG! 5NJibb[%Oq;߫'EIJB$HJ_L]6[@iBldL3 `&SNA g8JK02#Bb $J5'cdam@9(\f̸vZ8a9pHKb+t }c4 ҁ"̯$HL$!cñc')D1i1@Ɯ!P3twR%;+,$G}@52wxO/p`flm$%}6 A Vni%R ȞPG鴗EK\۹.w.eߥ$a+iVzf94)l&3oH׏y`=<)Z ~7} h{hEmxraH*gO1|v t^0W7JjuڿA8D4Jm48[{7]64;[^^n, dfm] -uĄEbHod;߼$'7U/^pk^qt%*ZTfa_ ^J4\laXh]㹠<ǿꕁ\ Dkx3 w6"e"%]:qX%;^a}ACm8ADAeh ܶ1N(JwmpmhOZ9.C$q;wSvwS>#nj&jTR,١J z/ œ=j0uorfﰇ}^ȸ?FgvxwK<Ó8Z${~qέ~[W-V^nh[@g5qf/oMƤD+吆jYbк:/ ߥ`DOݔqEJR)"')ILmHljmDhIr ̧ǟ܊P-?` EvX IǟYww>_n\}!Z48&kAh$U# N:` [w>ܸJ,ZH-$ǝ96Op{[O6l("Lsѩݿ m~n4 ƕҥcWx~o[X#)w<\hi&-RpXyZ9x%f\t^+.V^?^=N󕎼M5J)|եqW~I5Ź'gq_so^/H'޲[7rmdr۟<>|[65χ}y+:ki |Fw`Ϸ>(;Ƃr?r;v#`:k[H"9ڊ{ 4 bׂq9uJ_/+<ғ:,}t+Ișh)R6vZvA DtʎQDagiXڭ 9s-%SJwNnL-b":e(Z}S9ۭ|zEvBB\DK?|c.hN1h6c$lڭ 9s=Z((PJ=VD"(ΓK+$ Ʉs k)Lֶy[t3V; C3kSJpRsW{\w6X(C{{At:8z̚8ubB;إXʷq+ s#.M Vͱs$FZgsCRQli :D9<)4K-2Fz~,WD)_}L>+&k6~2h о" Ŝ)gM5C>ؾ?RyûoZ2FXJb|g7_d~xlUqjYA#31(FbIȮf5yrB^uC_0sn#jQ`$gAKN1^BDZٽAx=IpBb86{6l]Ҝ$x(opU* [ Nu6K|ɟeAl3i1&R"@}7hX7}Z,T'웡y7<=d?9.f:O)\rkP\/.1yM`[rA-<`S倾zfvѸ-5iCX rf5_-rtsםT7CUF.^18IE} f*~EZAh/%,@G&W0Y V4Dz[25"[;:_x^!EJ$f 2$q08ѩdZr@`!qt|n) rr Ԑ4 pAT,2m$ Cc ) 1M[@#a #][sF+(ʂEUzȱTd.;>o@"$ǻ~zo@0r${z{zSlT%1k HB:fRK"8Dx9Q919\8`#,,74ҰXŘ N*``FL=TE#0C/9FDjw1 B0=%J Fmi pOq*mrDi-߬ \QѢx՞yp[^mW^O/Ŀ>6YvF*`uu, œu~zx Jߐ7ȜdPH͜_pGy ""D8,O"| HGEwx4;tX$vBw7 Wf"~+ 0̈3<5HHF`r2gzcfJi<=0QRm=PN-רRzwi`/j~'ۖ^-21,uhC"dF\/^nY fJB&&d  E(ozǣ[vwV zgM3 J\9LD^K\dӇ&vfrBuC_>dE9K9[Toď⑗/|GfqF g6kRΞXiXR$~=v q[/F_]VZꋅW^z96}KBl7'Lʒrx2[ QxRZE_~9={Wbil9+%,¼_bWޕFHEJɐ"N&!Jj]Iѩ"Rl"aX)+"CIVXn#)0'M 5ud Z쒞PsjISIdbfp#TX rL@(b=ѥ(RːBY%-SIK1IR mP\!$SR5|R7e߹=Rohn7Ӧ.`*[񉅆v xU}c9efK ^_]a4$XݿZSB\/ddAς>X?᪐.6҂r"19˜H>]邪t\>|ou|gZ㽴8N1FZܚ*D]Ň;{L*֓eNV$[!&dxҎ+ؙ@>TP[3kY#ԯžW41rTIjhE %hPףX(ɺ{Q*]zs?hVv;ʗvU&QmojH Q& 6Hek~x#ɞUUu{ ̞g5͡"E^l;u-mTO6~eܝJlK" ntOqu [xKmڶw*ZMHW.^2 JàZ"tO5ÈoQ EtGDEC1R]hʬEUQT3kBR-E(K+yBmjllHJ?e8N?)QFiOgĊ$A1x |?ؕnDސ 3#mBI1Kai\BeʵFX0 ?!@&N]0YCHꁖGýaxy-#&M8۳|Mp !'En ⫱5X΂|v2Vb+3 ~3(Zʭ‹Ww|nxՎ<5*OD\JRDNxנb1.lc$;P0LZTkFεd=l$"U$L#cCS0jItDcztey1JQPЄiTi XcK9HdZZBx 3k{ &HMّnkdKؿ{Bw7hM٣YNij탙B\Fȇ̌?_}%A _ioæm2'w?jldX`;yS2}hzߙ8'S|gQZ ϩe#]žnC0(z/VH鴀HzaLy9k;'[;ܶ<}(br$9NwOuuBh6oȏZNqIdJPE[Pd8i+:ȟw#VI$#iC%"*#cPj9I5kXp -9GN!R/.8"˯N;RBФ|, B5W$nʻu*֩۲+‚DH-mBUG )Bό\*&VRzol°l|yq޸|<[r;*`yKuVٽ Eȇ(z)*7;_5M48y T!b>&Qe cg3}*]g䴔ҟ+U~J*at'cJ`?٣XcDY_TwMmq<A5m|SSTjc*\I:w^ΨYMFB~ߜ}s;d47G)C'Z̧錑mͩBbac Wcck W0rJ/ii;} W-)ǽVCU^[UkpQ#J9j௴giĈhGPV=N38o`sݵ '1bb\JP! PZ2$tR̡ O8vA؁׆onWI9/mc#ij:*%8Ȟ&Z"% c#Z*]IIe5^ M!ԟ MX/44#Mt Ü̍D hx"@ZbQIa8C@Np!s:՜RGzU}+NDkc9ߗ۱$rM*g\'ZWWɲp5=ܜ+?}| ɩĈ+ yAEW GLfa "Br~5|e~b)ܐ E*;=v \(V~# &Grc5(U2U^J5LjQ1#]hZU進0]Ig`ڨW03+P_(|,.g.gUt͖~e3W!Z][3]Y}E:_|LkX¥'a4MK7g{, #+,dOfE (ZꐉkZHH#d|vYl `/đ` mY f0^sc%ŗF1$TDT|gBӴ0nq=.{/0.-'#Ѝ8HjDi"%rb(Ng7"krWca5>:R@ >cg:mXx4><Z=z [puyAP5^4sA`JF7hHSkM zFkςfj_G|P g3P/xBfËnf):]MlxN3}_+b9kwV6_ů ~s4ݠ4ȗOu|kWsw6M]086K:zٚS7wmMnX%U~ڞ}٭KR] HnuwL<[{@ՔDJ AR+v)>\<9 ҿ:`nͷ Ie x蕣g|/7砞d̝OMofK08&08y/]_6ԯbn,{`v|(~qom=``\~xY$ :?_'~BK/\ i.\ 5 FzOjʚnb~+D]o3|H`)cWL{vf Xi&3j#272&)1N!sS&Wʥ%YRmn-IO.yk8p)cu ɃyZJi0xTf|[tܻm>>XņݺJ!B)伱sVR6@ r/{k׮DTNfSCͳ3DelIܱI$")à=~"Ǘd!d~L@)7fHcAG9qE(3Ek0T#D2$A׼c̐S4,Aog!\P<\`̂2-i@ȴЀzO7^hX==8(@#F%'hٖctS& )ԇ$(&l) [D^OQX伋AlfaP06Ɗ*;&2shDٟw)PfM6.w'I. XIRE&bV;qޭŊڏ>]!}ZpO[;)Fҁ{)Bkw0-;@H u czÊ+ w J7Ts#v#eq15&›Y=Ǝ.bpB1m hUj^D+rrbdFl՞=> k\=\w,׃CgEP3Dq`M4]刌ϯ5C~D1 `Q=]>:FEԽw/`%v֘K=ȡ>PW8ԏq+$;R?4p:#\#vYhKB0OzzpQ\ݍWɄ@C4jSȾ}PzTvU^l@*/քHI"Z\};n7` :Ѩq{@rgQD 0#z)kGE F1G7҅ V|( jo7ç27 7cŕB@2XZ9(',SiRyĞwV3$ej@QVb엀 # z/Nھ f ۻ^S*(`,>#t%]fMa ]1h yNR\Ԕ10屦 aJ֎pAa 18M WIRL3䜵i"lQ*E@.g 9|I|beJ\iKLn1BDB3hLpyb< My%ʜDyygP069OyJ1gHʑBfq`;8,r㌻NX踻-?1!m8Tf1\cdj6~R;mT89a:% ;89KB],"*?$_nD (_ A@@b ШҲbfl\,95 eg[ۅM&S-xV4@˵ `Hklgib}e꾮ku)Nƨְ$T~Xv9P7$ *7Z&9(Dȓ)NӠqݣ|s^͒S|{NJ//0Q"Ł!MK51FLYQ4p k1jUѨkxM̥v -!^-;jhU܋x,\9;mcͫ@a%_Mo,u)Cu ȅuB"ׯ([2>26b-[/O ˫Of &D%j'ap4) j%Lႚ۫Aͫ'57`SLnmlCQ5Z{8A^A` =6]~{rz oۜaB٘`W<<8H.R Hb?)HPo$bCm&lQp|zB0"jĄ A·7₧dT :/|1M(E=wE5(.{?0_ U5MGɱH!:'?QhӬ=.9e>/P_Alr]_S~CKӭyHCq3q>NtIuO~ؒUk^=zX@.1}W DK} DM>In|?:vxe䋙W}D8V C8l&)OEb$LQE*E܄4[D Pb@&Z8P7y ?K #؟'gDxH[P<r/ bEpnDDE Y+YpX'eS)U%[ҟ?ת'/>AtmmB=!}1p.A:?~|޹L}픗Gmam# O_d:R]cme[ETk1c|}tN95#h/:v-ܒ`ܹ(%%kaq9 @yRhOݡ1!-i0x"R##&SĄ, <»r*! ʇYR_%ee[+_]fʗ2 B1 fZf9g?{U‘;<'a ܸyq*%ƻ1HcLHEL94%I5F8M3)-# N )| ^+VU/>狕'LxXK`3>Y~=G"/֫e_W6]$-o=&;Ov_V7a,7i"Qo>(6%:dOpɖ 3fyU˜&*ENIk\R]%B^]jIzs:I1!Y{)Koqi)BPLJٗݪ) Ÿ^@>!"1LP6'M O&ݵ_w~]uw]^w`GO2ι%Y$rm-B;zwk =t/~_ﮟdO+ӭ#s˯I[߿#>ho~l ""A8) J~sZb&-d>,|3AU[|Zÿ?U}(R^=wbc:rҒ ntcE Ú]N*}_-m%.ZF+qM `L;/iu4Q*$1Uq=#q 4tpr̀ b)ʉ>Z}D rbJ9Ҋ ^0%|LH~ )wh®ޚ@+7Ov+=B*)b=įTރzag0zAՋF7P2{ ScӃYݍCnPQد%g)R#޶/7uQlyUэpw<&"勎G38UR*j1KyX*Pj )rrV2ʹa93ӒYc(bɈ 7́'<@o@Gl DNᕺt40 Lt9h4爁\I9B,Qm@+pY+fa'km>Mp TL'4[==84k(h{kЍ75Gyz~Aɷw!q3H5ն]8M]LM@of%LZjf3 (+3`RRsKδ5 s!Jz Gv+l!;D5)~}g f {Y8ej+ 1PWoF\t,L`Fχ=KAuS!|oQo~{r BJR\ 0$~04JIۜFSC`p7sP/V4 )Dv tMgt -#%{Lܳ4M| vǮ+D%><3*z)l_rm78ޮ@Snf} 4\<y[&oδ\n澘RAQU ڏ*vDl@Jkml豍EVFOU%ۏ]-JQ{@Ywky~ܪd VWGoq4Sޕ\B%w&@I0xQ]]mH O<[դ&+ ݧrNތ; }rUL CP̭|bdLZ/ q?8۴ҹ I.n E^|Lk5ͦE\ +_n ŽTͬ?[%AzC4z?'d ) aV6r-@JN8+ 2"M1Hs %NDqLh2@2 B_H B28VHfwAx [;[HsUgOJC"BQ-n*1UB*B`pw AAtKR>C{B`%H7 rGAkArG Pu'׶sT8xd( EF9}Ŝ6 ДNlf7 $4zۂ㤃KD+DզXcj\H{q=l%J5{bzvڦ]B#J%Wd]8yBD_q,2 .DtĜK0G) '` '=U2 a[b&QiU+ A(4o֊I u- #$9pU2 ˩Dѕ"]􅏅g֞0:xIC1%\M g.eWʒ9ĝwLT_ۍ1!01K/$@S@Rgjⓝ-4@6?\DxY IZ`H-N!@֔h 'fV FdF/I\($Jr@ؙ< ) !:NA/A; ui{rK.KB!GB)e;3Dew܉UGk>NlgQЙayG 6io$bh$(񣄠Δs%# #đlI.GB4T_)btR)Lmӕ^}?Es௻n~y^⿷p?ԏeNw&NV??. c40K84l`0pb&H2)TV2C`Ho|7Q׃x>T\߯.d}W_/٫[cߖ=M?gy=д$.ZMG!Biui6{iԌm__K3/}ٗ>{ڌIa"DR1BA4X'FYǭ4fFA6|P9m.;&lv9!Y`9s>lnN  DF&I#cac"VH#Lԝ]b"lX f , l/5J͉C N838b~D8vHX5TGLaMmL^&C"iED$ rJQBB&v@J-2\ @OsDj]h˫zysU6$8\lkϗ c_zfW {#mZ]FK쎞ޏz7rknJ~x 餤ܾ{*s2[ɽ Af}Sr+wp^,W[uzN}Bwzd6YޅglxaBIwO0Fʝ%mMw8Taz)YB$K9ա -< H ǜ*!pLuhr\`X1FGRGHFe97` 4x?ki c18B)I#"7hl;,¸܁@Sob[@C9X`~upk2"eXW]ߓ+ļJv*]w#x#OS/ۅ^ybm&IZs8OG¦ۃ`7Ndm ?$Y ڼa/'!Hl!WPg*Zp[Ly!9r~o^mnC(en݋56ÌL}Tճe!:kY)D^- Ť'Dmhs0D>oKqxk藗ߝZd؄b#Qؙ-1Q8`WUA)s*$;E&{ '-OCB ?{":[k ȇfc/B;ptcHi†%f @;dIqsB;q`8d8~$p':N4P&#lUF80$F$n }:JA.EB {aT,D;N"$%C6C;NXJMxJԳjLLg{Q8V'Z$$Tp0.gF;-7FWJ W1Rpw Vt2k4-%8D^hqlQFKX(9~@OiĤn4b*3XJy&ޔ@(׹ nj i5ZҲ:?MNKiαSia+=eVza# g5N?] A *[sJx0Ib&uV FT2/4`Rk@$1e&I$I.D"3NF`-c%g ?/ۺtxb)3f\~… /xT*FRAPv!Y׷?""'YΗZRԑj{ݻ&P}&ȼ0lh퇕VnW[TRk1U|M%iA*l24d[#6HCqџS 3ymQrZ)b2$k?QS Tec r/bM軱"Զ6@`cP2V;*'6Yk  5[|})K'ڗgxU>^A%zi4x2sϜ,?R&d߉^QFbNQgvgKͰDn慬YwK =987ewZl]o?C|ZAn|֋7If7V *>"qNFǟ']-0yo/맧iY&:=!n8کgG-9>^@س8,=,;||gVw-LGSWl ^?O<*$ .&b>u_jH61C)`r## Q~wV2!7<ݔOe3U~pA?LY`6^i{ VpoLrgꕺhRh@W˽](&koow3ޑv#2erDAkVo "/" E&b)TwQyWMmJo2zzԺ׺׺׺qVwp{|cjI$bFEhFL!F3 0%hχk`tw)8{^D>)V ՝=l!4K?luÈ<& D95U׃?!v*2WnÿMwTb;g%2'z7>>$ҪÑcH^뗥=)E7(LK1ȋrV3ld)#4h+NG޵5mc鿢m/[凬lM'-*͒:jPj5["%P H.Mw" .U*E xps=[ s/8l%a`IK"YLJi9P{@%)N\QhnΜpB:S/ wՑ.7\;cSRB[Yy]3JS$dH#1TK&Li=ń氶,-͑Zbbt U4 ),M [k)v(rcpCaX(?5Sbn{wx%?^gcZMv]20bkQw|FV$P?^<]nv% 6H>_/[h7j܆> Xh$wx zc"$M^濄2d ô`2X`+L 0T󒡜1Q\X"l." MJ,)9fY)0\R CJjaIi-4Xy(4aVg{#N!-<'2祖 O- BKk\!Ql! e0$g,D@E2SA-Ϊ3[LL.Q!5M`UibCUWX\29%WjZNNѳ^0yQPA-'N]Z+KM-L*R]Rf9aY/ܾ =N,H-jꭢ53h}_>qf7g挼aqo.f׷~@W / uW ,m\7ٖnnmSdА =kl:ubJdXKR, oyDŽ!-7WD A2X<aŁ<SD\fI!K)}\o3[a}vKP^AnlhcT/d lkꗛagɛAUݮz+nw/\"z04Zj~zp8BuOڻz;϶`Ã`4pGǩThRD~4?:<2 MPBiQ\ڮgF C4dRl̴510>S:qfQufwEwͭ`V_#Qa*QǒIx뢇2SRLF `2PRT(t )AbxpI5b!2tIfSc 3{[ 9o" g($ڤ[6<.a߽yZo/e72PP-TЯPV;Uz%+ ׊T55O_#sUwۋFF!-wZnZ}!>7 t lOZ@ӂh':Z1/F`_'?'?搇ۜ_S_'c'D8 GS_y!HLlԵ\ίnߧB0' JptH:B%EdT #zJNAku'ZI}'I֮'@RCdh Sv5SQjfFEW=\:: 9@#}Rh/f17E$kӓt 7WqH1_GЅu(g =Tu}B&3uSϽ5N0ӳ}е$x=$'Q-09O1$KTlQ>RaF+s7n?P;{篐 y^Vh]>+Yo֓/tĚ bUwȚw .1cNh/mOZ"y%Q_>nKەӥN?]j;xc@żiqnNОs鉶$,gXrdYjBOEl8&v]y2ܭ1w) :<:9х @K4A=>!ξ]a44[Cb_?<t9ћ2piߟ>J yN"K扠~]Z_"eu}D%>^EqO(Gq Y St4;j^~t_#_ ] =#j@e3¯BIu~tJEp -gwh_ 6V AƬ4i..fnx{x? _pܯ~jniWeUmy: M~rog嬐fFF)¬V2j(Yr34ܧ+$۴^ j?>ً|h|=\|{ǟхwwwW E/rˑ~Z~HjSI5i_#Uw}R?Asr١ \#JƳ"cY˨5~Bո5ncpv](wu`%ap!"WVIkBn >1L>HL=ykK6~8[نg-?ໍ5^|&鶧`{{ ׋[c=q[kVu-Mi˲2}Y-]snmlRd?'ny=/WSJ.τ:cLs24e7^lh{oH!/=]K%bRS۴gnm EiߎY/2!xe,A2Y,֖8c.RR9j SHLs&K[ i iKa"488ɧ~gu6滿,rgE )U\ɘK\{үSOg_V`J'E=Ӣ:HiU<ԔR۟y?.~'şP$gn' nj*JW@ip)+NjGµ{8M\X@g2gtN]ժR /d;+Gtz$HؿFî|rqn ;p(3rQ5q+ ~O -CāQ74;H)cz{qW1MNEțg}lqoz$$I{m~d'h'̣se JyQsMOUܧ Cբ1Gӄy]9}4m+^xngoOɹ~(t_0jF?2&ѼR\ՏG]_t:wӑbۻ\osV:,[9/^ԝ_xKbۦ,%b)Sys nJIHCh|$,$v.&^[.jka&`lE7fMƎ+㖝x2nUm5x@M6FJ%*"VaPe=?'1xk-y5 B[ P粪'_>-%FA;i+gE$9_&_)A|Ngh$lmqp=0@&s->C FNW;yJ%R5jFޠ`V(rڥ j׵TNJ:Į^K'&=M,Y[ 꼰&XIMdd 3ۄQyb0C9eO3+G/ُ/NR>ծB8XT \kt;$_/1'gi:I.JEګ ?5KO]=K-xI7X?>L VXWs N,te}yM S1^5;|kލc'ϳ'?pѧ{9#y$/|#0g-g9kd90fO&i{=}a8 XR%U~߫ K];|oCd4dȿuVܸ(R /ݼ "+m ؈]vmfX斴XKj2*XBtݲIXC1н:[cŤڝy^{8Vvƫ.,p*P"%)d B6VX w+1Bv"W` IFE)@rn䅌s4i[7>>>A ǃ2!ziRmidnL`"&kiֵ#3xs-ƮZ+^$k"EVnEšē斠་PY Lͩ朱FϣVT{k,*lP.UVd.?ɫ'ܜj}-E5" %RZF2H68U̥H\EQQuP5„igY:<(bfu2xb68]{/|,{JŅdZeu/qC %"YXTAR+HuURڱ>PToTXBE-q߽sa[ո>C.n\ Yγ y [X\5\Z@htJ[kc) kfd-B:P֖bi$I;P-l?Q[`EzaH/,y]z*"K.\t}1Jh\8Wt~\MFO!~QQ< %YcoOkmyCƳmH0t#rnc5b)eSSJ#}ɦ4fLelg+S9I$VKw'_5 G_X)dEaBnjigS`^|ߟv`-S}u犻]acÂ)tSL82ڥ^`6#_l^""_^&O`gg!%W 3w=aVݱ]hvm+.Σ28ףǢk{={rPLz n4O\ AK8Y@"Y:gcLʚ5fs4׼^?%1 0gƯcnGsށzb]O~O(%o++JAFt\_LfUk)Cx203׏Yɸۯsd:tUūjt'k.mw~tP:Y=c_ٷ3醴.\cVte(wcxe^W_k@˴ZY~ꌍ8CP򂖙vBr>(/g5rKE}Zvy.Cm]c]9C?IThRkgy&98tV+;hԚ]G-V2.o/L:XG4cyAHBmf'o` y7lޟ1k cX4KyWvrth;Wpuf<')f򲹨!~|!׷ULe|J9sjZ>|TޕVT*r۞ӡC"5i؃Dr{Ba+r^2l΄\fJ<2dB7 )7 !;X!eP"w !+-7d(}KdmN+SviݷpQ5L x{ _o6Tpm39%J\KgdZmÛ #ض`8Fnv0Q1L#@,Mo0{%C1 x;*x`mǼN83<[uLByz{']ٴczIc.IGpvN1o1% jW=p0xmٱ Z cG TOo g 8\&Fo͗^<|<P~%MߧĎOJWڞc8h7x0Pq3'q3,da'7t y@ھ7xfP|0?# `fi={Ko.gc$32Edz h};xtq8j{OǴAmu<2\I=.PH=02=B b#8&y#xc?wm˩7`Tr /Q597nr1g&YlF/vw]S6ĿfO~|DxݮRF<lγQ;oNnxn푳oK@!9XwR%wAystfԻ QPKu0( e%NCRy`)]uһ/X9 aMb˩Tk٤~q44j|Z?6ΪCwiiUn.{:NX2y*Uȍ AtDFFV́0!B9 n8yŐv՜. $cfD%̔ Pмi'MUsC56 NhO>BԂ2)y.+iK , qKsJ SȘ1,Ҁ!N2ˌkIh>xvu@X;RX4n} 5[>p*v~{O4Bz^]=;$Ƒo!grFXRWǍG#Y̗\Fl,0UsKF_}n-ϻ+?Vӣ˳/hN>/7}(~W=,E;(! %?jF8a6ke#CS ne聏'D77drq?O&09}ubp߫#Mq@M}VشBv4u#c4p׈i` ֊>jD/B^4Op[=V=20HB'ejȑ_]v_Rwwkv/f\FcSN6{Z30/$`ӭ:x)%d$nr,HdԻ z4h8PV% ]Dq,N؁tea5q`% b],5uR+ N: kd _>7]kd-.Ud IkD~o};ѯNTzCLؖtH0,$>'j"NAQ GPUQɵlߏQx[/ @;ǒcrv]2,hS~HGny/~σ E܏-)5K$DHUNZM$VjDi"c!7߁5Y.<e>GhN)c?3E2;|rɶ5#d!νqwADp,$K|32mc^4=G#)@9Mg/$1fo?]/+OM GM v { "HXsb4 E3dmP~s\NJ9W%[egځhq_²Is/.-sV I}һ7s9 +P˶VgMt;2GMoKtX@ 3q̌Icŕ%K&ށms'd@wrM9? [xܨ_y-R," JRNQPCZKFD*1Ft1ST r._$[#a OHeXS" 8! 1|Z'J.ԃᤗ\Cd+E,L uYCazHl=0FSvIi |Glk]Lcn2yH6,mXDX%đ)Jm5ʤ\0+5nHJKs)ke4D~1OY΢0V{@R{ܬğW 2R y0)nLkIZV-KYZϺ哳*O? ԫ /u6^6C\֧LMvgKw?+O^S$oqJ쟸Xnn<[C>J5&L`nT-SnguҢpCIYYу^`rGN>Q~w~ Dו1^-E4/jQ)a@CЏh4T襚sn/02ss^D<=d:\"jX9Z|"ZEؾJ-ͬknU1(#:UE. xhMUVR!!߸V)vq5cnU1(#:UE.RA6; YȌXӸo4| ^4=G#?v:#x^t&ģ? g0륓x婩45IQXhc&5,MEBEjyuݙ Ξfct[J`l^HxR2fZ"BZ+i`ٜh)Z-R'r۵zPs]L5S|+qtcQU<-PPr0j ʗH愷KxQ݈ts5ʊ%P%Tz(V OSci96Q4%C~D)r2=m].7<_ct7X~s6QzxF3o寛k\PGKFN"0"OU;y+!Xwr k`ol<{HKLPv,%'CT"d3)'wk/N8ZxD}=[`L5iDpF"Ĺ1:@B0J4שeD% MRYƉTU,r0X;4xܙ+>엣 | ;LJ"/7q`'xkY(yxfӷqWkMB_ˋ;<#lYoѯ½8D@pr;?_t7OfB n%0( B#$՝oƓg`njo&pbj{w`IT'q0Jq IM:PL(ҍ>[h1I)gvQ^ y`ϔ.AyFQfiԥ0s~klA} t~76xlM6Zi;lRZR$aFJd.J#^jNRԃZG :fnjKwEU;z*{0V>Yɝ_ZK*w"lp)Jhfl/2S{ 1B.0i wa*TŻ[IZB!KPvft1<(rM,?zݔ3+bpI?j o_uUFâwmK~3GxlPR4Ͷ:4F?:xsT(WcF^<:# hh{R VE3si>9YA4UX,tzCQwfro]WgijmM ,i  SIhE-:*f nnLf%Zʯl-]T(W}lct^vBqoCUzm]eJJE/F0WB ʆ]RJ٘%JI7Y{TJfĩ⡨]I\eRD`ϸrB#F[d9k"dq2$or)/41JrV6[5C % ǂ23jyl5QZ"n1'n4~ &{0Ah gLIP/7{nOGijɜi[\L` %| S qW:8$N[V2LOQ4lmF\e6j):XX$w=nHrú,apE0Os$dߏݲ-)I/ȧd][NЗZiE+(54 ,"ȉv;1.:U8ezIu,u#DG$tL b ➮Yw^lFc]$ aUŽX۸,]e,}uV^bɟ~8R=w~P>*T·ecw_1p+?=eU{xqo6Gvl's2%hc~xpLz<#TtlP/½LI7f[F}$ UN)r4n4jiDzJ^! LIswAE $!/Nc D=7 S &4tBH"T)N~^cʐ3L&L4N[0Wa[5acͨ 7>hS b뾂|ޛW*xo{5$x!*Sީ|փ R% U :V5^#hj<5b`/,MJ1 1Q\9RrQVd'pBCXhiI,⸂`BtѩGRՠޕdakɠI} IQ`*2").\E -$EErB" I1bN1dC'O *0^P٠jE!ELbNs?:ͧ6,&/w6-4O(湆4D&{z"O1槴jZZ:I򠚷~z. 1j7?ڮlA5}RNG7D+8'idq2-A 9F 0aWN5)i (?kQc~S #\rMxpN:tLFqrN;)& rM'EMi̱LI0* ?hLDi@H1F@J].TIdK evu =.h[!5׋ )JѤ8X4 0ET2$K7$f*6hg`߄ENh.QC:"˾CIv?aR HOnGffW+&0IMv] (AP*l0QS^5G^WkF vvF箔w[K9~\}Ùt,m7׿e"޿)Wf!gȃ87x'Dؒ" >wv2\R9xN߇:oyc<x_x|'9d?_ozھڹnڧ0N į5ppJ#{އNzp.G(bNص\݈/QX:z6Cp08#l aRƟ#,$Bh1=nr*JNK Q,睠 yչ9fmcOW#e=į 4"mKiUݥ*H{g)-KGByOn֙63 OgG3g%#؜2i92*/TuqR(m:[3kSSV4oR!"91@*2E*tI(Bf79!Kʢ\ʔ; Yɑɵ=sTF# Bݨ n Hܭܕ/.ZP@L\f!ƲL[ O(LIfo.HeF;!qr)2oyǔ!WT$\IbUԞ;ah]@wdb:اNx5>C u,v.ҲG`41i=ϭzn1_=8 tE*S}7E;lzp6w#VBY*,Ŵܱ}~ta)1LLI~MǓ3 bc/$LI 2ՀBLD{A+OwwL?HAIA@2tGMN|G јLn%Ow)APe%"$.LH/-$5[[`U2Uڕs[ֺA /4*?B`_h <˔y"2Jsi&fT@E(QCU i%-R@ӞAGW={ؿׄF#\EvP̱+W^sMz XIhYࠋ߽?D~8mvPq-_[^On$v>e5;~z>ߥy_]-f;fMb@b Uҩipj gՎ5sWBDkjΓ~oENkQQ]%Ί G=Gݻ -A~8G {9"\HխDɩYy=7ApHxQդM$lP |T']V9Lkԫ[xCCq ҩe\W7.SXB1QwtYFQDlu '#\bh7u 8yr g9S.s+' E_/QI`$] {N~@A'??ƾ|:??;|P 'L^rhɽ-e .|> 'ժ<^hypFKBQ(7p*3@=dj@iqX&WK%5/z3"j!MX"_ r/ p^=m1T fWUǏ,6¯uaE~=ӴÍЯgsƣzxC,ADDst zRw*jP{ OHZإ\[UbF<0:EP>qw$}WFB04gei2!$W!*sˢ4/J#,P.;G@c>&JC&s}z)>H ʶ߽-[_/N_>l@~t_`RB kY)8&+˕P cWm( H!#?!P@9&z*4xOSZLasᾒiJ Be\x8yH2󦠚@)E P:ÿu2s^l/SbLBO̗?׿o:]$'rW+ o0 JL˒\9(?VЄ*\qPl =B$΋|nOwW[*0 -PZltbURtiB;aBl+J^b^9ALͩ.<9cj.9o< 2x.p$b ^ȃׄ2ct^ ^ڮlgv@2s$hP^zG({R!33=(ך *X#]8>_v,P@$al6sqBFSߊW*_w~]n)cMOuM!)bP:=>bK}^@cҐsP{===EeP=̈j/ef2^Vo=W_i/$Q9׭lKv$fXH[U܍sJTexzֽ-2Mfc/b"H*ɞ{ZIxڇESӚ*)HK d0Q#W08c Rb"QŒJ 2?$ {mk4tbMA)GUˣQ;6xWr82 s2y \pq;)^~gښʍ_QiSYPtݩi&If,FRF> JiYDҖ&ƖI55NR,cܚK:89t@9Mluth%@4DJ9"J)h11Tk5!Zj2}VeIʗKnFKFJ&vy-vfmBl"m fAc$4f[:N#8~<1ǣ.DsBSӛf.LhԟA귟Iz:t9~\A+ϽṼYzߺ#DH^% ȩK@jby+Rːw,i&EnR 0;zː&M"*pBY9IRC*%P΋RBV,oJH$gHHM!^˝?_dFSn)vzr1euӓzxyPȬn|wKAŸB/y |?k^fDF7+]rb+_/]~TgTj39|3/Cfgg͵n,J~g#,[}A AK$UZ5/|}g[il31>($ɬ~v8\o5C}띞@3dS,|13}u'Y_Ma|0;vƣ{yexP-5,wC;O Zy6q1 ,l x]jQאS}S-nk1FWXp34a~h{&HAC6 QQ'>BSi%2*3bX-ۈ KHds/f6!b` #WhɥGuU1OP gŅ!: w\bcJ\m݁5?޳AR* !К];LEE$"h#>@v#G+a~L?&.rQ8e!MDbĤl B߼cvݡn>?Q)us}o3j#@[K 'XU&Xv#C+Q GB x [ˢm4s>n5qԦZbMcL1TTB[Ɗm8cdKu 'ZBX/b_&ƚZbill&rkTtc5;Su:wƃ1sıȕ9 g[,>̘j!ËEAX'"٫i]O4Bn @ڼjMl/Бwհ𴺅ujHqr<]KB) 5P?gYE/<v2U1a={f:88⡚&eޓBkHr**m SURUj֙ΑL4ֵmaG 5kKcNP3$ v <-4[Q5[ m?᣸x(V9eu* Qrexgfyr(\f\sRj͏_ws9H+` x~tE -e^Wt_ܔmyl}`$&k=w'vTAZS|h$K^M%`cNmF<9U lGmA7X9$fá`,RUcb3"4Z%<x+KIF%4[^( \ߜ`+&+L%7aJU%U0؃ayP -Ɉ-Y%gM8Ϛ%{KEpuR}(u0>Mp>w K&n:==&o~6B#*| 5f{X6RcLIxuE5y-دwa~~QэQZ V%$݌i$b%^9*{uF0h2aOLA[XXwmIٖQe &bfLSE/iVEy˄D6S]ӷl=+: ƣLVgd0~z6O,a$qcfuV=ߜB o#"DODc]3\KH^[͢ieC!dq:U nFߞ#].HU3vH^`d5Q';>Y>~X sfHWw2wymO!D^V7FG8J55Gs%W§h);]_z(cߊվ]Q;".&E۪q2GO>nΟl`RUG/ 2U 4Ht@Y NTsr}݉Fo$Sp$y4cw QmicϵbQ$N1.G>;LcHIAζP$]k^Ǟ0ʉ4tmPޑ:4Am~ylTjw;u5˼Gx.˺:IE;!=mt$*W@o (j烉g%La>$:V(7s;O?]dt^᡻iB0g~laKpI$@^ iR[|zCO4<i~_-Jq1Ĩ wՇ7yQ̨`'%_U2F<% R:xSF>X\ ?_ezd`_[m=hDӨ7kDhQKmzd^y0 EƦeJUmkKKAe';NK{PnzhFN"~r-sI!P4H:B.#b5Cwz*2qo[}~]0EL5!J6:KLxmnⴇ vv;vN^(j~3q`boguyQ\!vu ƪFKKc8oaB]M|`0K v*5~t4c(5ED%^/q#3[$!S~_SǠEߣˎeq0o_ZϣNǼ`d^ s\nb@Q?j[[KMK_'J{j ;:8Ay1v|䢭ցanyo;ALf?Mw]FxJxٓFNZ,@!k* ՐG J xk{{H {~k9}،>|vb~\3V/t7Oz?/7P<>xi_~􀀼D\篷m[\2xe\j;/_燿5.W'o/.^L< i.pAH\4ϟ]K O&Cp y$wgןopH8ϝP#}g;7ӐOw"x3:ܳjcNcy'*krkM-m CЬQ t5f0j|@1Zv5RKbR2 ^c5bRz,G gՏ~"N]!NSc]l/ɷ$Ǐy6eFͥܒ=kx0x;0?VMr6:6LѺ/NB 9g[& kǾx; r@;cNmvs"v79AÆgoeAX,0?`b ]5TL4I`gܥ{H+!w3 İ/1X07]t0"$J}PFgj+«7 =sjU׺"inIERd͵ۼ$IXMQKbNo Kxoڐm>/jk-PkH-x|)"1tVT إ{4 TFh6쁵#h6 U}6׷Cfo?+nhQ(7BZi2J,"5c{@>#| }? "o^B/C\HFge6A{{}u^CKw5<yL] jh.o#u ~ZZ2ZSQ*  ЇTm6`B؛/ /~}(Т؊9Y !!w:w0J7zgL}F{R\zTՂl!%5\vMwAjl]jov/9gj%l%6`!H֨La%:zvmvIի7":E4|~"ڊzφ6{Jοz{>RFVsۄ,`uO9}'"~-۶NkHy?Wuu#ACQ5v*|z3\Edo}.ם5ܿ$F@eR.m\u >3y9;gO%|Pj<{7R\DBv8]: 0¨ ¾]HHNXKwGK[TZBo6]g 7_7lW۹"eBwQ6'x"2}'H@6lWfݛԖBx.~ /LU#յ{#QҬ\Rٶczr~sCƒn+\RTyC?"/ƮS=r.#5Q0ۧހYc{O#IJUaN½vQzcz `Re2| f:0ttKǨiqvvgj۸_amn(WRo+YW~XXxHlWRCR$ O7=&8Wy|aq)ߟT0}tYD?8g5t"k@O'GgwwbQizys6@sdK]~Ygeyz9zWb$b8O |!s =&KutҶ ֑dbM=_^ma-O<$YbȽ&KƉW׋ߪ03NcGoh?no1tܱY>qJ}8To4_q)Y|aXE*ٓU/k_I_S%kw#-$dV/`~!!fѠ܍F/i]*x[kC_m$U(0$)O}f>TD42&„8.kBF:OR.;aXO}XCH)s 3 )Ev-1Mz3MxQ.}!yyqG1:EMZx#C !q}''J<%wJrcAC!H0/tQPx|%;$c.8ZC|z˛Iਚ]%…Q>AbpC9\$#ē@4B@@)/7坬/#c;ѓ|ʘCDjگvFIuuLc^h\L#\ I,T@U19v0^ 2f%|m΀4L3V$#[c.ʹǙZ_>0JwRT&t`o3ytt>LgBI]5WSʯ|*oK+QL^޽G5-FV߹<6]*흓NT ϒT1Is lV`rR/Y5ˆݷN{2ϗR]] $GT%`1M.2*vZ1Q 4f%|9$ P'P(g`ژ2bq<9[gX̟LD{x[lEQ =wG{\JvbT6q.e'Myuޱ#c:*JRr[$4~3dNq:nb2 ZPaq~p!Ыwђ6?!+mHϐ9bnfZăT!T̍\t1v07n5&R:`AcP3#vփD#C%(X߼ikJ3] W-SZd'6 "B2z"+u.%?τfȄrru\H9W3H# ;J7v=2\jMcJMpߣ=l&ܑP|q0Ml{vf8 g7=<_-,G8d/o]%xKOE4yr?w+nsN-I_"79[EGjL' 9Gy|ӘWg)i謜B%<sdz-]{?w?_`W1~&pu{O{В -cWwM\@ S spb?ƫx'XtG︪\MB@XI ͔W;H͛M^Wo?V*۩3-OQ23(MU[H hue(S L_ Lg;:ѭ/:FW%Uxi|1g; fidć6m(Mhypfw2IMKq=~kZS VhѠR5.dk nQ*oM^DG9a!P9Eœ UHkl͵y02k}^jPb53,i8cQ?sxׯC燷Cn6ϧQpFlT滈{c{9XZ_(}?IӟY6پgtyF$u꩕SBJB2mB&\qct:Kz#7$˕ݨ6s%u0ުzy=%pR J%9XdA8̥?94&!0BK 02@K2L]mo8+ ;d/nƷX`nqC QdܝIߏTm;TvcA(Ra*>V}Iš<[y* 9(xesc#y%'_k7a&Ә߻ CD˨?u5l '1^hF O>W x-)T?os7 ۾ջw76 vo_o_n1.hpU~ W$9Z( b, ͲԩQImܟQjely4~>/n\O  d#̒A! "M#3m͌\AGj_ő4l~#ty3MJnY!/G{d/5.䵒:Lo.KցHLp4ʱ_ij=W%5[v4-!tz'r~nW6uKebE7f]ŗWnR*D`ևhSZ,*zٮfwmԤvqLIA@~ 4C9M%:Bh4J݈K>~w;btljXo-2ѻu7LG+M6ѻ :hݎ޼[DB8D01qdw2ӎqEa-WtEe_h)`aI$LEi.,W—!F' (K靕 Zr$/cR([E"*+ -Mw Uv$= eN|ؒ"͜1e"픬"_ig "gkj7PƐp=Sn3¯$Krg 4Puct+ԪP諢Dd@22!Ϲbtgqck_wSv"ۘpVc3TpxsPM-Rj؉TAoMP6e۩@N B:I;g;,\Qȡ4FyB3`U"/q@xާhA>쟄3o A)8:^IeL 5p`vƆy%wKvf.$폮\,jtwk)yUb.n} {ĪYDڱ̲QD|FkLpǞ)[4I6Gч 5u~9h߸?ðu- 1~K/ױcfҔtQg1h7nwmbFWgv#vX̴ٓfiHA'f͝5'?kgXߪ8_S%Y g >#YRf~^[Ypܐ'Rd dّ7"ٸKrf,J625 % {'X@WTQT,F[2sT-7Zl1q\e#G)p%%Vʩ)YAaUҒ&o/TLp֖$J(eJ'56!&8垵ȸ) ;[*q cDnEFf=Z7{v*=Z(+ύBLA0)rUAGi, 7ZUWliE" 2^a()GU 7u1hFlq_0wL{ VWG3RɌL!+3<@[IPi|3C. ʟ'H1Sﺗ E*|O_ >.h=+ yxgۀYv: 8s�"Kc18o7ŚWYP/;v\s ÷(f{}Qͧ?[e7sre@C@~uMEu]F_~x1f6G5we&%Gw~e0JG#aP `f17J#-V5NyR>! v4>`i_=.ɂ- B s&!͂K~CFRCeHfL+!=׮̦<3{|]'H֨Kwujk94BsNn͞褉]+Ei@e|fٓpz~uBsQA9$Tr[j5Ac0,l^ӓvb7ѵfM:䋛XdL;'}O hӲ]lwa9} Uw2-Pv^n?a!4.ǷBF7$=n4isDddL 5I_NBer %/G"idl֘۹Sް> $ތ㪿jnĴ -%@qhK2qm='zFH İҍaq빯f3'&97FR"=j{K3b*'Sy45T-oRdn_m<{yb~(Pt(mr#}&Z:,MOgOnbtt2Ƣ$Ks[qYg$(]Uv ;OT9"?Kr8%mn ZϤ,MeT >)Kt"-UH[*T|.ٖϑIv5۳e6RHF%,`J(ٜKM ޑ\Hm ;xRZ/~Mw-Ѧ\{AY_ieY qQMq{1OS?)6 V=F~TWg(9f^Yٿ{Z$뿍.i4z/W}\voәo4{lw:[Dg·F?3R,L9f%gkBjo!&bB+|y~ ۘpv*fwZ^2/I68HmIX1 oLa٦YM_$8:CreMt^?okkXL>G_21? wQǞ5~=#"Ns n?.; Nd-gI'__ZNd{X{+I b=)]L\ VB$BufN2!Ϲb1}V;HMLI;l4"ۤ^ZH3JOBT mR_/K|JSCiZ+\7흈ۤ^(5L)K_ ۥ^ESD)i(:TJI},hQz(M=I2JvOsQ3JOi'3=5 _{K[mQz( vOۥ^ғC)`Ej1_CeÞ|u(Rz{R_/KMN3JOPz敆KQ!=q&TύmR_/Kg0JyZkKQtR#syiSJ9)yJ9Z1q6JLJ19ICLj#FH\EkRRaQJ< T PMeQQ i> !CCeTQ < 0J NcxJk#'Ri(źnC)1HCi-6JTPj> JCi-5sQJ* ޕ0PZKgJ1}~.Rpt+Rk}/=mD&J(JA%J\0#+`uNL(QȤPɢcЧwFzKg*I*,2z5H(A"'>7d6n)/mZX橿qճb'wjWa3ߚSgMUp7 F/  QU:y,R uv:zx -~j,Qwmz\]~uӋb2nx1%viNZg$|Bfڤ!1I4X&qOYFihAY.yl($/T޾|YyX+*szsZƁEꋿl4u~I/č$+e;l;fY-iд&͂8h2 `jmx"^.lFI!楩N|v-z,uuj/p6T~ICVkYF_'#z^@! j#\cn(4?y1>yf2~)XR}v bhF`BL6 #c#"# єY. &A;Lj%5 8^J0+ssecCA\QVJ&BKb)k (1`(8f %UwrujMG,۵<#n N閙f6𶌅_|yse/ /ߟG?Ӕb{u3:?!6o[PIƓ"oiMqUIxud xr&!`3^]!:1t0\6H*E(QohOhp@WWwsnlTP +/4xqj3)O8r}ZRK.73~DrVKEj遧U\)+?ܭz0 ր;Eaht aу.L|pͅ*}d랊VZҽ9չPd6c`6m!,`uLTL =.tCCڞ[R-[4J[7kplf96ܙ₝@,qɋݛDptbF6@F3z~e0X}w՝֛co/i+k-O hQK7VCOd܅+"ʥTߊ(rJgB`->Lf{0gp$G,b2vn5ʐJض ٫R9ԓ9C!lmֽC^$WAv%s%r.2@=c*ihtů?kq[S1Ur:BY 00}J? :{L #&'g_ f>syiٻW;Cd2Eh}bͧ`\;RQ.gY Tk$ypeQC;:$5ߪ?w- &dl߅ Pr0N/ UN\Ew(; .P&w@rY\5[!D491gija8*,n8M]yiuJ~g`);Zpi0 5*/ q@p],aP塝} \tGE~I*u8̏R9H4|I !4Vm-(,[ZW%m-xNvb W Ӌd19! L6&nXvȱhsF5%X[;Iؘ|xI^ҽU!%)UBfbWnޓT+ R=ZxH|@KR^(`R=޹8H},_nO &sSIV{ ?W(YXZ0Hh2ֿ̂_\yұF9.tYNFUb`d-Cͧ.ŸTD5ܗcXw T[VƽҼ̴b]'v11Glpc $W&|.tk 2 Yk2>0bʆ"lۮ<4&<(kT˜[J]֞?5["^1Ir~,As '/#Ӿfn<:鮮N"38uz;2]q38'kz\삲kLL&{VtJ>}[:XԩQ^!Fv2ϩi-l5]= O.s}e[ &6teg7A [{&:h'ٟkSAԎypm}.0 L̳L d2ꞝa";]%:˒YozA5F'=2tj*øͷ*LVdDslaȲ M#mZ6mkYۂS* q-ߓIhX.7hRЙ\jL?.KiXUף:W)f® XаRg/m }ᦴ7!3Lw0|qp\`Y &dl߅ 䲪VA6ѩ Ɏ\v} Z ɝ܃ZþfI+GYR\x0ݧ誧8[z&(x/!Q7BR7_в߾ sW>6gp7DK$Qmcx3%x#MPqlq EL̝_yͽ!Kq`*TeIϟ~mpa." +NQpLXŻ|Bݶχ7@qpH΢ۼϒqe3fܺ?͈f~lo\~=׋E>>L>xKo` ܅ ٛqwɍ^bݞMor+`^t z7 iȤ>{ w=9naxQ`^ߺqwR{iޚ} Gԩz'om4M VG(Xqc F~R5IKD\Ngc%v2>[vf2 rs]*y>[zEMnzMc0;3r6xN^' [Y@M-ߟ ?WߟN_@<{Mr5Y/^ӿ^W%pFaj{fw3'ݟR4r^Bo㉽Ix.^u1~ѕs2ǁQ Mz;q&t4sqogѳf8}7DfC8z?%LHOҩ xb/~? ǯxϮ;j|3=o2'3~9{. RWȊhǚ/Q0@ߪLVB_A$;s@FN#6sN[ϽNVO\e!_ٻ6r$W b2@\ nާ$xb'9n~lڒ~U b"Y,K=ݺyb w'Ļ'{=YU&*ey^@[OaٗuSiݻֆkc@GO滟#> Qk8I ʁ.w9 =^8eYDQ ݢ=*x,gcg: >kQ]F˘Az;TLX8'8ag6̅v!.sJ,p<2n|0ً?R|{W[ пGkꪸi^6(]03`٘*Fgj!Y#AwC_.}h3hޫ=B:Hy}"Q kh+7"V=YZ2G·<7J9!g\kߵNB%-+2 L7q9l.VWmxč\_IR#R/O?GUЯM ׆U(W"e_^sC?oo2kѮ$JJ)y0c~eonYz?/bJZJ%{G{u#_>}*<(`8EMzZ==4ҽ'xr@`uKt5PLWڭ{ZQthL/랳eAsj{=20z DTҎPF*bl|]}zu|HpSi;K O%ݷ3:}LR:hoKͪLK$n qf[gZ3DZRu"#z R5< IG m*~5I=:g#D9P[k< ?iL%ljf|ͷ& A =zz_9-8p4brKc<;#0+S&cYDs鹂)OY$fšM&,1G6l(d5($^0+| o&dUq9+| d,=fܒGyOk_\'.6A|Q.o\W6\9z3˝fD&ձ,-&U\CFᒑ7[|s-FKD:;E㡎D>:A`fl.\QL4j 4xo&˃x<fT>} Z^ضG럃m=oLdlaMFl5R"Qd)m'i]%癴$QACx`@$=Ld!t s;m9bԶ< -Sh:I=&%2AK9 .@PB-tP2N4'6Jyn2MK\ZIhiZZRJYKOXK!1B,juiթKٝ&noY点Ui܇?m2$֤X+^kR4 ^ND*mgە8>$u)JhY-J{6JR]s5y`8WK8K~ᝦ`-,vs&!oDVTLyA%fLҘ]" }/ +#F*}9rq}{G6z%~T/ :8`Š+kIPy H%$fZ!$@͖]S(plup0`f#4-v3hiPw%8!J]f^A"Јa< S"TlOŞZtD6`@ \LLC%x S}jrT8@Zʍ4s][Lhڔ4|1&d RbI\nr 5g6'Z\ڦT^$e 9`14`-Je .]@9C+2EC!1GiD%K{4jkoWTNVjA}I(FdQdwҙ^its{Ԁu֜Xlp%tNQ$h[ނd12diV Y}eȪ/^K!ZWK'ג[dRK gSO-gq>::;h{RJ]m; ,CT1G#AIcGoc%:'ؤbc(WYҁOwCJAv8A0@UQOL G(νB(/=EK31jkg:1gĔ} Ҙ zQ}QZJ9 x%/jprJҒjĹމk)4-ES83 -}YKOZK)qǧw2jZJ;~Iu옵4MU+f J;ik)4-EP"= -否%F\1MKUI3>GEj|>㟸D-ZvZ*l&Yq.f|uȘ،\g ֨*jj;sT_ԩFDI-iZJ8qԓ稾xBYKOYKG?+AVy/TczғRӴ$>GEji44i(Ҵp{RXTvZoGńڊ́sC$̣7V.?]!XU g ~@5x;sO_Z-H ʁDȃ]r$3OL8˙!BQ-JkZ16AZWy+ǹ--ƕ7(r T_ԩ.ڕZzZ P#i'@iZZR2kIk)WiZKߴzZUT# pZZE`xVU>~>*r.ː={uk{Q'VK4}jXHh;i+赪^^eu\ =6:*/JZΏ7&~C49]ٛ?o֩ϋkDE1|gz^$S@İ< h2ަ?t#ޝ5RH|"sߚt>/ũta/n^r2oFVo#w>{uW'm3q2VߣsPJ d5Ʃhh9v#bF#H,,L tgeW\vYŹ2L@i Ad\ c :#Jxd;CJWQ[ȀT\=Hc݌P^HyR9x8yp Q>*Ғ1!zn*sgQ!Ԟ(ryO$ )U/cuwbʲE={\;7 X'˛շEyuY_|f׼&uL:FKEo bO<ڽȅ8AGI=:x(0! ;ƍbg$B.{{o깱@dأɃ A"}T{0 yν"X_D~pP-US-*Ka#;gZq;VЫ{"bq+"k.wNanMwq_K*H q,c~-/nO_#oo3/ jv n5j e&3-K*rcA`&E.d)yЊ \*&ƪFMfEHz5j6!$?+~RPed$Cvk5YyK]U%E+irډ?Ez(CR݇gւĻ`@ZWWѰdp2h%ܯ]D =̂t٭Dlk{)B~mo&B!=oǩڢ[^s50+ub(%+/]?oCCġ[~\'O~A67\u\}p<ϟ}9{uX|Iv_g!3,'r80 S \>Kb00q@??p64jҢ8b J6G#=Nha3 DFkMW档H]hOl>m1.heDu'w3B[ܰ0Hr#Ă */ 98̌ݿJYKwJ֛<2LKXjjHO/U!M߈/q:儃U'x]X;7Гv/NAPiz7'FνNp-,{ y&`S1|Ĺ$4jEFn21ox1|tp@օsM)0 SyqfQi{C.{;LŰܬ' 1Z^[fFlZb57j!nQJj+^Z=ڊxIjdNL^)Z.hJX Pm5i]8(8{Im\pr|QK1|A4Z!|hi?u<\~qahpz=*1StQ̛۩dXvU"a|_}۪q͛FȀJIabZy=s%)E1W{DdVJ'*|u҉J)cWfu MJGfP=yUL.p܌9á}{23LLd^Aop/C2t6*srnJ໨Ox.<(Fewޜ4كy&| _='SbȨ"I2?{Xp3=0ç6]+&3f=As7z80FU?{uNH+8@P7fdNU\qFVŝ:ϭ*:G(LBf!9&2(Y qFDA46[Ńdd@4"CӔNL2diA3Ym,2YTҽB1Ҕ%i^e_Xb@02 >ĨTY1-)2&5#.{ h!PPFN-N-NOʩunF(CZhTLeV\fBe,H(8HMesPF"r9rfh@Ɛv0,`S`OK C7E 3m`@; &x4; )c($I^CD?G-}/E` L*N*UC85Uy N=_?;X=&1V~f cUЋza\ Σ0 f5IC䡌4yPk!B ';l`~/rir(u TNbxXYNe9q"C 't>)$8pf8'.LO-+>pqΧPkmp?5Asal5/l j| zx0ؿ(m)h8ؿ& ~zp-'#ؿt Sk5.G Aa  GplCNC' {)Iwu}1%CbAU%)T首̌6tf"qotS-5 6j L[D-MZH<#$l?n&+܂&z7y ihon4^m*܎esk=Ñrvσ3ptkU 0RڳG%&RAD$HctGrCk\4`= Z1~J4ШFn#4Yae\`|nXsCk\ |)R4 (;i(@(VůKl|.k-NQ3Āy9 p8{( f*-C!7h s#Eh5^M'!@h\*j<\IR":g~k[ VLU)tJg fD s$d V؂dǘ͛m'(EIuR$K'UjkTkJǯZ2ȸ3EȬ5WȪCU" +7danY`a"lAXVe[\ [*Y+% yF3+*sa:lEq"8x}JV,m+@₄% [2ҕ1L0ZE. ire^1ļ,*-ZEr ܣHLPއ庯F {{\{aZdkV3 ZnEaڽa` h}Z3@{Ib}}縥h~Z{_!- E;NhhL= @LGp(&㋍ysάϯ=> K 6+8+Oqۻ^ÄO;޳dpٿ_ZdςzFnFwe_Vˋ j/~kC f*gO\>ƹV^Nw<+"d&Y{lbȎ"^hn9a.F̭&\~V\}8}fS2p:+B\2И)EסooD N^я-V(p|еLKPz'.駇&6e"!eF14ʧcǟ㧈Y 6X7%S3%lwd붂 oZ/Q>R[dŭ7ʷ&~2:iҨ`8=Xmi68I9x i*4V 9aPA3,F|қ 5v[^-B0gFbԲM>"=gE [T@iBpG.0ɰȕŽcM!o$/ c +F /z 2G[nu ^к\S >ࣄ̛giU7t󢍐xdgP7i8܏%U<*LE _B%DuNUp) msʸ@  A,8 Ȁ"s!rΌݿb։ῐmM= kp@r}s% 5K :z[2l%ئ.d`$B(|HHX_mIt"Q@;e!)LV/JJQ ܖ6DH @3$6hΆH¹ʈJpRȌ@e.VFsx@KVBg:WV9wܼ>a1v9cwc;@]J-ࣄ%d|Kg5H!p"(OQbYB0z( DLl ~g(qxm%:6[l;Cdjt+9}ݟ\\ߺ z2싕}P/Oi=?u2{=ϿfUU&wuO5<O{-∺?i,b?mꛌ=֩Y=5ffJ3Ӳ'/j[$ v3߳>E"JQ,$pVzmQ#T,@jj`JOJ1H~y%N&`ǩڦٗr RN^LIX1v烄R H!$}iM55s@_*jl;5 l'mA.缕PcT_mSY uғRqC`ǩڦ ?maV M$WTkVf es@˘VzmV]?K$T0+Yb{ғRQ?94h_Ƿ`\W:‹O-V]0#4_[e 80"q׈㧁n,QIl,o2c= {TbdT <^)0uu-kjأWlOu!}KdS+5*%z_Kg{vv{\y/.>pSBE+ϴ؆H?n8Z>˻8L{1M*ƅţu&p(t^0x~C 琯H:pJɇڹ~KԷ_mZpj{JԶ~6ֻs4DD({Ry;Sm{RyHGhdϞ+Hƈˎs^:醫I\#Fg8 2{sԛSA)H(d/zGmL lbpXAr,:  L gy@de cw2 n*MgY?̩&ٱJLk4 t{5kR@3V֠E+gV8 [ -,bkiI(ˍ%ƐfD*{ng;#Y}t/E퀲% o6)ajYi*BM6B RN9xZ_A*:SWg (bp[rL=M8(M(\VVL_ޔl6\vs"uRW Ӝz>=\ϭ/W/VBX A~ٯPͪ[G?/=+M~~/yV ZE..ﯿ>Z;XWX9ᑭ뢡͘~]?tr#muO_aJ 4 yӣ~1=*crCkuҨFWmd:SFs.;I\Gu!i!fhtsfHY'*wG!3ˤU ,ln@U̵u,AXOt-gQǷɵRl Be.|{!jD%,OٿVg@'iĭ:N섉$X[G~]e!/rQ{8wS'S0WWooޖ$kwjbZO1m$m峵7_hPn}s8n,.|*|EY7Vi7x!V_LE"l*_ K?yk||޼Sk^o]s7̳1Z 1^a9=u@8RubҲDmdV$Z&`6Q guC42YSyRI9 eZېg&Dqgq s`k/2i'0[Khih.,$.4L)E)&㉑X@IHuaveUi_E3]fkol^dmVWJPv f-V)?}{+kg7>yWK`j=߀Y|Ϋ뛍>ݖo8((n/ھ}\ɵ; wwwkqYF#Q|GӃS&aW~ZjY]@WG^q nhLm«un {u{qCSC:vfsUN͕oiZ#:^jޫ&l鼟uVpuZ^~9Z\]7m5@Nŏ7g5΍Wbu}}m[1w]??~yUnN9 W-tqr7~ssssVΦBrt:Ypԑ 's"[dpwk.빓N"zDd hDTמw |ă-spp+3Yh&.:ik5'OlMˆMM&>TkrMB%+}wE_G~1z* ШALte+ AޫFPh͑=tmkX=78 0϶ɶUc,LH9U޽@S= }g@i GA;IꑊHܩ(-ہ߹e[j'E@16wZ_%f-\^jr fBГT+()#^=Lx$G.@&Qr\^(b7<)^ޞgn!3n>W;Ƴ@7t 2Ced(U~s{劂KEn鏻 L)]zc~SQ:ޓ,䙛h'|gݻ)Nݺbc:]&\E(E9w~ӻa!Dk6y:Y$)^d磁~{͇p4=K~QF v CΔ4W鄋@k5qf3Bb,EΩ6Ir(I&H3]e< iNZ_Svk<~^.vNU%VHy3,wwޓNjW/>:0ZũR`O)G%u* eݟan:nG)ũ]jN )W]T.ljB=ZyPJ8.*iν|k[WnY1FbEBrMcBܰ/,NhIE.i"UqXlxM0V }(EbvyhOeEjswy)U$LtKЋW Ս5 #Hǿ9++d78=ϱ@73{Ls;mN/suNBH;AS/sM^xenl0쯋odִJZ?N.I{Ҽ;ܙ33gg4ϪY%+%T'δM .HDIV2͓XK̈́J1 Ri`R'b2ۜ]_tT53l7zvu~y:^PdzK|9iƲč!$CmA!\*p%N{QdlPA+)ۨaheGȘaDa>(c!5ј{=%T$;<;w<AAZH6S[N=(YCUK_k)_\Hf aZuGie 04ߎNV6jU(|~\2~W1'6ٶI9B˹y ! `<0[g7\^,-@eۛ7ѵ_eC4j8c|f#oxZ74@ }HжUoh JOA ީG XG0xQZPՒկbOb-Ђmn/d7VD^myzε[ ŞyXbC<,>-4HϬ*%>K-xs:{\XaOwRF)3?+-FbmJEL aFKۋ?+?vDyXӚUJ z jUpA' |J4rvU@MC4@*al(O'tBTV) 'Ȍj|Һ)j_]^8#`aq,ez)j9 mz8e] ΑSK9H(\-O&)SU^*jxSPUsW#} ΈS90ҁ`l$εͥVs8z~JoQGN,R/~:Cbt>()LH*^﷞Qz.{9)k[ZTr;j;,i]VmGԈtT+ڑ܅]`%J%IjJ҉]Q>Pʑ GH~4k?W8Ժ = #`X/#{ab^BYr,2b)b/df<5qb_ J~ݧGoC`:V=KA x bpa^jj, :^qĘeiƂ'J#·a.tGwhbzghD30 dUqA$ ]\Nc>F6m}z2'(MLND+er{EI]91$oPWǺZl_-+lO8gS&hgH.@QOFoBx1xa?5kgdtgPf]g0Td?w~ޟjGgh32ޗ jGMx6Y[$] OfQ^8%هBvk*";</vr(Z[R<+MDЊÐF!n"SDOeiJnvY o3(1#L^ T}_p nZD8'tLa aK{@icNcET!1|h5^p"}@bSдxǑRcE7$g˭D%0 qNTJE̱}slO c=nMz.*#Hxg{L`o#3úDބYY{$} $)UuN*ϴTL[* v Ĥ^*J8*' TqKg_Du瓟*[[-4 /%ƏđdtXrzܺ9+t,T "`-VSaf,#Krc)3Q$XzVY:7|3var/home/core/zuul-output/logs/kubelet.log0000644000000000000000005537060415134142745017715 0ustar rootrootJan 21 10:55:07 crc systemd[1]: Starting Kubernetes Kubelet... Jan 21 10:55:07 crc restorecon[4695]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 10:55:07 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 10:55:08 crc restorecon[4695]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 21 10:55:08 crc kubenswrapper[4925]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 10:55:08 crc kubenswrapper[4925]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 21 10:55:08 crc kubenswrapper[4925]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 10:55:08 crc kubenswrapper[4925]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 10:55:08 crc kubenswrapper[4925]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 21 10:55:08 crc kubenswrapper[4925]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.972193 4925 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.986954 4925 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987028 4925 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987036 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987043 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987048 4925 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987055 4925 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987059 4925 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987063 4925 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987068 4925 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987073 4925 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987078 4925 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987083 4925 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987088 4925 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987093 4925 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987098 4925 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987103 4925 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987108 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987114 4925 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987118 4925 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987122 4925 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987126 4925 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987131 4925 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987172 4925 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987177 4925 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987183 4925 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987188 4925 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987192 4925 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987197 4925 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987203 4925 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987207 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987211 4925 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987215 4925 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987220 4925 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987223 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987227 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987231 4925 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987237 4925 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987241 4925 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987245 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987250 4925 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987253 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987258 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987262 4925 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987266 4925 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987270 4925 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987275 4925 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987281 4925 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987288 4925 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987293 4925 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987297 4925 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987301 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987305 4925 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987333 4925 feature_gate.go:330] unrecognized feature gate: Example Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987338 4925 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987342 4925 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987346 4925 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987350 4925 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987353 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987357 4925 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987361 4925 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987364 4925 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987368 4925 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987372 4925 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987376 4925 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987379 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987383 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987414 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987419 4925 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987424 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987427 4925 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.987432 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.987994 4925 flags.go:64] FLAG: --address="0.0.0.0" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988013 4925 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988022 4925 flags.go:64] FLAG: --anonymous-auth="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988029 4925 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988036 4925 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988040 4925 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988048 4925 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988055 4925 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988061 4925 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988066 4925 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988071 4925 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988078 4925 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988083 4925 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988088 4925 flags.go:64] FLAG: --cgroup-root="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988092 4925 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988096 4925 flags.go:64] FLAG: --client-ca-file="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988101 4925 flags.go:64] FLAG: --cloud-config="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988105 4925 flags.go:64] FLAG: --cloud-provider="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988110 4925 flags.go:64] FLAG: --cluster-dns="[]" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988117 4925 flags.go:64] FLAG: --cluster-domain="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988122 4925 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988126 4925 flags.go:64] FLAG: --config-dir="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988131 4925 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988136 4925 flags.go:64] FLAG: --container-log-max-files="5" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988142 4925 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988147 4925 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988151 4925 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988156 4925 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988161 4925 flags.go:64] FLAG: --contention-profiling="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988165 4925 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988170 4925 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988174 4925 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988179 4925 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988185 4925 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988189 4925 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988193 4925 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988201 4925 flags.go:64] FLAG: --enable-load-reader="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988206 4925 flags.go:64] FLAG: --enable-server="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988211 4925 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988217 4925 flags.go:64] FLAG: --event-burst="100" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988223 4925 flags.go:64] FLAG: --event-qps="50" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988227 4925 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988232 4925 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988237 4925 flags.go:64] FLAG: --eviction-hard="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988244 4925 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988249 4925 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988254 4925 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988260 4925 flags.go:64] FLAG: --eviction-soft="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988265 4925 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988270 4925 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988275 4925 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988280 4925 flags.go:64] FLAG: --experimental-mounter-path="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988284 4925 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988288 4925 flags.go:64] FLAG: --fail-swap-on="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988293 4925 flags.go:64] FLAG: --feature-gates="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988299 4925 flags.go:64] FLAG: --file-check-frequency="20s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988303 4925 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988308 4925 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988312 4925 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988320 4925 flags.go:64] FLAG: --healthz-port="10248" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988324 4925 flags.go:64] FLAG: --help="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988328 4925 flags.go:64] FLAG: --hostname-override="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988333 4925 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988337 4925 flags.go:64] FLAG: --http-check-frequency="20s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988342 4925 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988347 4925 flags.go:64] FLAG: --image-credential-provider-config="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988351 4925 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988355 4925 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988360 4925 flags.go:64] FLAG: --image-service-endpoint="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988365 4925 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988369 4925 flags.go:64] FLAG: --kube-api-burst="100" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988374 4925 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988380 4925 flags.go:64] FLAG: --kube-api-qps="50" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988385 4925 flags.go:64] FLAG: --kube-reserved="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988414 4925 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988419 4925 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988424 4925 flags.go:64] FLAG: --kubelet-cgroups="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988429 4925 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988434 4925 flags.go:64] FLAG: --lock-file="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988438 4925 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988443 4925 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988447 4925 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988475 4925 flags.go:64] FLAG: --log-json-split-stream="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988486 4925 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988493 4925 flags.go:64] FLAG: --log-text-split-stream="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988497 4925 flags.go:64] FLAG: --logging-format="text" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988502 4925 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988509 4925 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988514 4925 flags.go:64] FLAG: --manifest-url="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988519 4925 flags.go:64] FLAG: --manifest-url-header="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988525 4925 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988530 4925 flags.go:64] FLAG: --max-open-files="1000000" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988536 4925 flags.go:64] FLAG: --max-pods="110" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988541 4925 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988545 4925 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988550 4925 flags.go:64] FLAG: --memory-manager-policy="None" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988554 4925 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988559 4925 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988563 4925 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988568 4925 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988582 4925 flags.go:64] FLAG: --node-status-max-images="50" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988587 4925 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988591 4925 flags.go:64] FLAG: --oom-score-adj="-999" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988595 4925 flags.go:64] FLAG: --pod-cidr="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988601 4925 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988608 4925 flags.go:64] FLAG: --pod-manifest-path="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988613 4925 flags.go:64] FLAG: --pod-max-pids="-1" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988618 4925 flags.go:64] FLAG: --pods-per-core="0" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988622 4925 flags.go:64] FLAG: --port="10250" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988627 4925 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988631 4925 flags.go:64] FLAG: --provider-id="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988635 4925 flags.go:64] FLAG: --qos-reserved="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988640 4925 flags.go:64] FLAG: --read-only-port="10255" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988644 4925 flags.go:64] FLAG: --register-node="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988649 4925 flags.go:64] FLAG: --register-schedulable="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988653 4925 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988662 4925 flags.go:64] FLAG: --registry-burst="10" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988666 4925 flags.go:64] FLAG: --registry-qps="5" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988671 4925 flags.go:64] FLAG: --reserved-cpus="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988675 4925 flags.go:64] FLAG: --reserved-memory="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988682 4925 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988686 4925 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988691 4925 flags.go:64] FLAG: --rotate-certificates="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988696 4925 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988701 4925 flags.go:64] FLAG: --runonce="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988705 4925 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988710 4925 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988715 4925 flags.go:64] FLAG: --seccomp-default="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988719 4925 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988724 4925 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988728 4925 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988733 4925 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988738 4925 flags.go:64] FLAG: --storage-driver-password="root" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988742 4925 flags.go:64] FLAG: --storage-driver-secure="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988746 4925 flags.go:64] FLAG: --storage-driver-table="stats" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988751 4925 flags.go:64] FLAG: --storage-driver-user="root" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988755 4925 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988760 4925 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988765 4925 flags.go:64] FLAG: --system-cgroups="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988769 4925 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988776 4925 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988781 4925 flags.go:64] FLAG: --tls-cert-file="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988786 4925 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988793 4925 flags.go:64] FLAG: --tls-min-version="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988797 4925 flags.go:64] FLAG: --tls-private-key-file="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988801 4925 flags.go:64] FLAG: --topology-manager-policy="none" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988806 4925 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988811 4925 flags.go:64] FLAG: --topology-manager-scope="container" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988816 4925 flags.go:64] FLAG: --v="2" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988823 4925 flags.go:64] FLAG: --version="false" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988834 4925 flags.go:64] FLAG: --vmodule="" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988840 4925 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.988846 4925 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.988968 4925 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.988976 4925 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.988982 4925 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.988987 4925 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.988992 4925 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.988997 4925 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989002 4925 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989008 4925 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989013 4925 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989018 4925 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989023 4925 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989027 4925 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989032 4925 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989037 4925 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989043 4925 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989048 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989054 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989059 4925 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989065 4925 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989070 4925 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989075 4925 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989080 4925 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989085 4925 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989089 4925 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989095 4925 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989102 4925 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989107 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989112 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989117 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989122 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989127 4925 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989131 4925 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989137 4925 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989143 4925 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989148 4925 feature_gate.go:330] unrecognized feature gate: Example Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989153 4925 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989158 4925 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989163 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989170 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989175 4925 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989180 4925 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989185 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989189 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989194 4925 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989200 4925 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989206 4925 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989212 4925 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989218 4925 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989231 4925 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989236 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989242 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989247 4925 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989252 4925 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989256 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989261 4925 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989269 4925 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989274 4925 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989278 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989283 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989287 4925 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989291 4925 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989296 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989301 4925 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989305 4925 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989309 4925 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989313 4925 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989320 4925 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989326 4925 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989331 4925 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989336 4925 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 10:55:08 crc kubenswrapper[4925]: W0121 10:55:08.989341 4925 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 10:55:08 crc kubenswrapper[4925]: I0121 10:55:08.989363 4925 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.106349 4925 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.106471 4925 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106569 4925 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106584 4925 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106590 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106596 4925 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106601 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106609 4925 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106614 4925 feature_gate.go:330] unrecognized feature gate: Example Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106620 4925 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106624 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106630 4925 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106640 4925 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106646 4925 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106650 4925 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106655 4925 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106659 4925 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106664 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106668 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106673 4925 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106678 4925 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106683 4925 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106687 4925 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106692 4925 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106697 4925 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106701 4925 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106706 4925 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106710 4925 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106715 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106722 4925 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106727 4925 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106733 4925 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106740 4925 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106746 4925 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106751 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106756 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106770 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106778 4925 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106784 4925 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106790 4925 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106795 4925 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106800 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106805 4925 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106809 4925 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106814 4925 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106819 4925 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106823 4925 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106827 4925 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106832 4925 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106836 4925 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106841 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106846 4925 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106850 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106855 4925 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106860 4925 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106864 4925 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106869 4925 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106873 4925 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106878 4925 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106882 4925 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106887 4925 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106893 4925 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106898 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106902 4925 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106908 4925 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106913 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106918 4925 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106924 4925 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106929 4925 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106934 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106940 4925 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106946 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.106953 4925 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.106964 4925 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107154 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107161 4925 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107165 4925 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107169 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107173 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107177 4925 feature_gate.go:330] unrecognized feature gate: Example Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107181 4925 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107186 4925 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107190 4925 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107195 4925 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107199 4925 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107204 4925 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107208 4925 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107213 4925 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107217 4925 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107222 4925 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107226 4925 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107231 4925 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107235 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107240 4925 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107245 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107250 4925 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107258 4925 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107263 4925 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107267 4925 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107271 4925 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107275 4925 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107279 4925 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107284 4925 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107290 4925 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107294 4925 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107299 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107303 4925 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107307 4925 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107312 4925 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107317 4925 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107322 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107326 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107331 4925 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107336 4925 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107341 4925 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107346 4925 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107351 4925 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107355 4925 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107359 4925 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107363 4925 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107367 4925 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107370 4925 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107375 4925 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107379 4925 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107385 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107390 4925 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107417 4925 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107424 4925 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107430 4925 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107435 4925 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107439 4925 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107444 4925 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107449 4925 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107454 4925 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107458 4925 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107463 4925 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107468 4925 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107472 4925 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107476 4925 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107480 4925 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107485 4925 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107490 4925 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107494 4925 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107498 4925 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.107502 4925 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.107510 4925 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.107782 4925 server.go:940] "Client rotation is on, will bootstrap in background" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.111604 4925 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.111755 4925 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.112358 4925 server.go:997] "Starting client certificate rotation" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.112409 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.112682 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-06 12:18:48.858586668 +0000 UTC Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.112864 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.140012 4925 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.141739 4925 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.142425 4925 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.165872 4925 log.go:25] "Validated CRI v1 runtime API" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.259960 4925 log.go:25] "Validated CRI v1 image API" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.263161 4925 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.266011 4925 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-21-10-50-51-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.266041 4925 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.284713 4925 manager.go:217] Machine: {Timestamp:2026-01-21 10:55:09.283337941 +0000 UTC m=+0.887229895 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:57887b03-108e-4b07-83a9-2cba1ffe7256 BootID:bb3c8461-270f-4cd5-aa85-780d3a9e3ead Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:42:8d:1b Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:42:8d:1b Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:0e:f1:50 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:e6:d5:87 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:18:b8:51 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:a2:bf:8d Speed:-1 Mtu:1496} {Name:eth10 MacAddress:0e:c5:f5:42:f8:2c Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:c6:24:3f:1c:0f:61 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.285003 4925 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.285216 4925 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.286242 4925 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.286486 4925 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.286537 4925 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.286779 4925 topology_manager.go:138] "Creating topology manager with none policy" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.286799 4925 container_manager_linux.go:303] "Creating device plugin manager" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.287024 4925 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.287057 4925 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.287367 4925 state_mem.go:36] "Initialized new in-memory state store" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.287484 4925 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.391918 4925 kubelet.go:418] "Attempting to sync node with API server" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.391978 4925 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.392028 4925 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.392045 4925 kubelet.go:324] "Adding apiserver pod source" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.392059 4925 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.393764 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.393819 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.393905 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.393944 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.394327 4925 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.394882 4925 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.395871 4925 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396438 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396466 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396476 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396485 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396503 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396515 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396561 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396576 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396588 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396603 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396617 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.396628 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.399100 4925 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.399704 4925 server.go:1280] "Started kubelet" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.400081 4925 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.400179 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.400325 4925 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.406174 4925 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 21 10:55:09 crc systemd[1]: Started Kubernetes Kubelet. Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.410986 4925 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.113:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188cb9adb6f3a00a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 10:55:09.399662602 +0000 UTC m=+1.003554536,LastTimestamp:2026-01-21 10:55:09.399662602 +0000 UTC m=+1.003554536,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.411984 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.412029 4925 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.412084 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 17:26:27.396109633 +0000 UTC Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.412298 4925 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.412348 4925 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.412380 4925 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.412509 4925 server.go:460] "Adding debug handlers to kubelet server" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.412525 4925 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.412647 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="200ms" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.416135 4925 factory.go:55] Registering systemd factory Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.416170 4925 factory.go:221] Registration of the systemd container factory successfully Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.416729 4925 factory.go:153] Registering CRI-O factory Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.416953 4925 factory.go:221] Registration of the crio container factory successfully Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.417096 4925 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.417147 4925 factory.go:103] Registering Raw factory Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.417178 4925 manager.go:1196] Started watching for new ooms in manager Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.417954 4925 manager.go:319] Starting recovery of all containers Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.419835 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.419911 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427682 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427792 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427810 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427824 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427839 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427849 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427861 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427872 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427888 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.427903 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428032 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428052 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428069 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428088 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428101 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428112 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428128 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428141 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428154 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428166 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428178 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428191 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428205 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428219 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428298 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428313 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428337 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428351 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428445 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428462 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428476 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428490 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428508 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428521 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428534 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428548 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428561 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428576 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428588 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428602 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428615 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428628 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428643 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428656 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428669 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428707 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428721 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428733 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428746 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428758 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428773 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428786 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428806 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428822 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428840 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428855 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428868 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428882 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428896 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428909 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428923 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428942 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428956 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.428969 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.429005 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.429019 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.429845 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.429878 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.429892 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.430489 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432090 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432121 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432148 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432169 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432199 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432214 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432227 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432240 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432256 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432271 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432322 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432337 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432355 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432369 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432385 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432419 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432436 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432454 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432473 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432487 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432502 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432517 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432531 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432545 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432564 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432581 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432601 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432620 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432636 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432653 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432669 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432684 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432731 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432749 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432772 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432794 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432810 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432827 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432846 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432864 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432881 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432896 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432914 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.432931 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.434574 4925 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.434624 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.434641 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.434654 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438722 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438757 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438773 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438790 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438804 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438819 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438835 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438852 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438865 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438879 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438895 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438914 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438927 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438941 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438954 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438967 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438983 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.438996 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.439008 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.439020 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.439032 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.439043 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.439055 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.439070 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.439083 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443034 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443067 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443116 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443133 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443149 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443190 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443204 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443227 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443259 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.443275 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446685 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446754 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446791 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446815 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446833 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446858 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446878 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446904 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446922 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446939 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446962 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.446984 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447013 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447032 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447052 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447079 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447099 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447128 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447152 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447174 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447204 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447225 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447254 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447274 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447297 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447322 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447341 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447366 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447264 4925 manager.go:324] Recovery completed Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447734 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447871 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447891 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447940 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447959 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.447974 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448003 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448031 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448051 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448068 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448083 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448098 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448111 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448137 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448152 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448167 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448183 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448198 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448214 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448228 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448244 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448266 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448289 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448314 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448334 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448351 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448381 4925 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448509 4925 reconstruct.go:97] "Volume reconstruction finished" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.448525 4925 reconciler.go:26] "Reconciler: start to sync state" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.463509 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.465511 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.465562 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.465575 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.466485 4925 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.466503 4925 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.466528 4925 state_mem.go:36] "Initialized new in-memory state store" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.492874 4925 policy_none.go:49] "None policy: Start" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.495535 4925 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.496085 4925 state_mem.go:35] "Initializing new in-memory state store" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.497131 4925 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.500166 4925 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.500282 4925 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.500365 4925 kubelet.go:2335] "Starting kubelet main sync loop" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.500485 4925 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 21 10:55:09 crc kubenswrapper[4925]: W0121 10:55:09.501416 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.501647 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.512725 4925 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.557891 4925 manager.go:334] "Starting Device Plugin manager" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.557994 4925 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.558017 4925 server.go:79] "Starting device plugin registration server" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.558724 4925 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.558755 4925 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.559212 4925 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.559336 4925 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.559349 4925 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.600860 4925 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.601665 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.671833 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.671862 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.672040 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.672058 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.672583 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="400ms" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.672659 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.672713 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.672727 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.672971 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.673594 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.673671 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.674086 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.674128 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.674141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.674265 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.674433 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.674490 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675007 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675055 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675078 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675111 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675357 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675383 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675408 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675432 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675459 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675473 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.675531 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675592 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675605 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675615 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675742 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675883 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.675924 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678041 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678080 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678079 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678148 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678167 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678092 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678474 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678794 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.678846 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679106 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679115 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679284 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679314 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679767 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679787 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.679796 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.680341 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.680360 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.680370 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.681227 4925 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773554 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773599 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773665 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773689 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773707 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773723 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773739 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773754 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773769 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773785 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773799 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773814 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773828 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773833 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773867 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773886 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773845 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.773927 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.874895 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.874977 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875008 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875015 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875031 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875054 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875075 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875082 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875102 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875103 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875132 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875149 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875133 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875173 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875179 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875204 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875213 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875231 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875237 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875249 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875266 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875294 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875323 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875380 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.875659 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.877644 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.877687 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.877702 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:09 crc kubenswrapper[4925]: I0121 10:55:09.877737 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:09 crc kubenswrapper[4925]: E0121 10:55:09.878162 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.012454 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.028958 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.054163 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.064430 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.071709 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 21 10:55:10 crc kubenswrapper[4925]: E0121 10:55:10.073527 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="800ms" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.581661 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 02:05:42.694924623 +0000 UTC Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.581762 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.581755 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:10 crc kubenswrapper[4925]: W0121 10:55:10.582450 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:10 crc kubenswrapper[4925]: E0121 10:55:10.582543 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.582884 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.582920 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.582931 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:10 crc kubenswrapper[4925]: I0121 10:55:10.582955 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:10 crc kubenswrapper[4925]: E0121 10:55:10.583221 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Jan 21 10:55:10 crc kubenswrapper[4925]: W0121 10:55:10.594927 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:10 crc kubenswrapper[4925]: E0121 10:55:10.594991 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:10 crc kubenswrapper[4925]: E0121 10:55:10.874783 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="1.6s" Jan 21 10:55:10 crc kubenswrapper[4925]: W0121 10:55:10.942344 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:10 crc kubenswrapper[4925]: E0121 10:55:10.942492 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:11 crc kubenswrapper[4925]: W0121 10:55:11.014102 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:11 crc kubenswrapper[4925]: E0121 10:55:11.014240 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.325339 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 10:55:11 crc kubenswrapper[4925]: E0121 10:55:11.326573 4925 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.384235 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.386448 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.386533 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.386552 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.386593 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:11 crc kubenswrapper[4925]: E0121 10:55:11.387466 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.401446 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:11 crc kubenswrapper[4925]: I0121 10:55:11.581923 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 17:10:46.225091058 +0000 UTC Jan 21 10:55:12 crc kubenswrapper[4925]: I0121 10:55:12.401895 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:12 crc kubenswrapper[4925]: E0121 10:55:12.476238 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="3.2s" Jan 21 10:55:12 crc kubenswrapper[4925]: I0121 10:55:12.582923 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 15:09:11.506574473 +0000 UTC Jan 21 10:55:12 crc kubenswrapper[4925]: W0121 10:55:12.665025 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:12 crc kubenswrapper[4925]: E0121 10:55:12.665117 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:12 crc kubenswrapper[4925]: W0121 10:55:12.863275 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:12 crc kubenswrapper[4925]: E0121 10:55:12.863352 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:12 crc kubenswrapper[4925]: W0121 10:55:12.958879 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:12 crc kubenswrapper[4925]: E0121 10:55:12.958990 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:12 crc kubenswrapper[4925]: I0121 10:55:12.988582 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:12 crc kubenswrapper[4925]: I0121 10:55:12.990446 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:12 crc kubenswrapper[4925]: I0121 10:55:12.990500 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:12 crc kubenswrapper[4925]: I0121 10:55:12.990523 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:12 crc kubenswrapper[4925]: I0121 10:55:12.990561 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:12 crc kubenswrapper[4925]: E0121 10:55:12.991194 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Jan 21 10:55:13 crc kubenswrapper[4925]: I0121 10:55:13.401638 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:13 crc kubenswrapper[4925]: I0121 10:55:13.583982 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 09:38:36.999882949 +0000 UTC Jan 21 10:55:13 crc kubenswrapper[4925]: W0121 10:55:13.725340 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:13 crc kubenswrapper[4925]: E0121 10:55:13.725492 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:14 crc kubenswrapper[4925]: I0121 10:55:14.402160 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:14 crc kubenswrapper[4925]: I0121 10:55:14.584129 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 05:25:51.172895654 +0000 UTC Jan 21 10:55:14 crc kubenswrapper[4925]: W0121 10:55:14.659849 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-b2fd79498d311d4d76b3e0caa6cefd82f7637111de031f59f8598b3493468b98 WatchSource:0}: Error finding container b2fd79498d311d4d76b3e0caa6cefd82f7637111de031f59f8598b3493468b98: Status 404 returned error can't find the container with id b2fd79498d311d4d76b3e0caa6cefd82f7637111de031f59f8598b3493468b98 Jan 21 10:55:14 crc kubenswrapper[4925]: W0121 10:55:14.665681 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-1ac825c93eb485e8733cca517acf4a3bd9ad25c6d1cf7512b56da88240943e36 WatchSource:0}: Error finding container 1ac825c93eb485e8733cca517acf4a3bd9ad25c6d1cf7512b56da88240943e36: Status 404 returned error can't find the container with id 1ac825c93eb485e8733cca517acf4a3bd9ad25c6d1cf7512b56da88240943e36 Jan 21 10:55:14 crc kubenswrapper[4925]: W0121 10:55:14.666246 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-9ea4d76bd279c9efab4cce9eb3ebf6124a2afa5111022337ee1ee3b341c4b2c2 WatchSource:0}: Error finding container 9ea4d76bd279c9efab4cce9eb3ebf6124a2afa5111022337ee1ee3b341c4b2c2: Status 404 returned error can't find the container with id 9ea4d76bd279c9efab4cce9eb3ebf6124a2afa5111022337ee1ee3b341c4b2c2 Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.401698 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.461314 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 10:55:15 crc kubenswrapper[4925]: E0121 10:55:15.462808 4925 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.584286 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 17:47:54.86214628 +0000 UTC Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.609245 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1ac825c93eb485e8733cca517acf4a3bd9ad25c6d1cf7512b56da88240943e36"} Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.610036 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b2fd79498d311d4d76b3e0caa6cefd82f7637111de031f59f8598b3493468b98"} Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.610886 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9ea4d76bd279c9efab4cce9eb3ebf6124a2afa5111022337ee1ee3b341c4b2c2"} Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.611657 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"104ed951c91efd08a22c55f853da0b392f62fd7718c53f6ae006f84c7f8ba34c"} Jan 21 10:55:15 crc kubenswrapper[4925]: I0121 10:55:15.612548 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"85453d505c88e4ae0f8762101ecd9be5f2c67d850e72eadaff99ecd352af079b"} Jan 21 10:55:15 crc kubenswrapper[4925]: E0121 10:55:15.678243 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="6.4s" Jan 21 10:55:15 crc kubenswrapper[4925]: E0121 10:55:15.972650 4925 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.113:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188cb9adb6f3a00a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 10:55:09.399662602 +0000 UTC m=+1.003554536,LastTimestamp:2026-01-21 10:55:09.399662602 +0000 UTC m=+1.003554536,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 10:55:16 crc kubenswrapper[4925]: I0121 10:55:16.191909 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:16 crc kubenswrapper[4925]: I0121 10:55:16.194191 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:16 crc kubenswrapper[4925]: I0121 10:55:16.194248 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:16 crc kubenswrapper[4925]: I0121 10:55:16.194266 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:16 crc kubenswrapper[4925]: I0121 10:55:16.194299 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:16 crc kubenswrapper[4925]: E0121 10:55:16.194860 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Jan 21 10:55:16 crc kubenswrapper[4925]: I0121 10:55:16.401825 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:16 crc kubenswrapper[4925]: I0121 10:55:16.585346 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 20:39:51.447995372 +0000 UTC Jan 21 10:55:17 crc kubenswrapper[4925]: W0121 10:55:17.212238 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:17 crc kubenswrapper[4925]: E0121 10:55:17.212370 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:17 crc kubenswrapper[4925]: I0121 10:55:17.401541 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:17 crc kubenswrapper[4925]: I0121 10:55:17.586248 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 04:00:49.854351962 +0000 UTC Jan 21 10:55:17 crc kubenswrapper[4925]: W0121 10:55:17.721320 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:17 crc kubenswrapper[4925]: E0121 10:55:17.721449 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:18 crc kubenswrapper[4925]: W0121 10:55:18.373733 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:18 crc kubenswrapper[4925]: E0121 10:55:18.373860 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:18 crc kubenswrapper[4925]: I0121 10:55:18.401496 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:18 crc kubenswrapper[4925]: W0121 10:55:18.504506 4925 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:18 crc kubenswrapper[4925]: E0121 10:55:18.504612 4925 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Jan 21 10:55:18 crc kubenswrapper[4925]: I0121 10:55:18.593882 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 17:31:22.185633872 +0000 UTC Jan 21 10:55:18 crc kubenswrapper[4925]: I0121 10:55:18.623551 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f"} Jan 21 10:55:18 crc kubenswrapper[4925]: I0121 10:55:18.720224 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7"} Jan 21 10:55:18 crc kubenswrapper[4925]: I0121 10:55:18.725496 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940"} Jan 21 10:55:18 crc kubenswrapper[4925]: I0121 10:55:18.727136 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd"} Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.401892 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.594721 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 07:04:08.748888288 +0000 UTC Jan 21 10:55:19 crc kubenswrapper[4925]: E0121 10:55:19.681880 4925 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.733366 4925 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940" exitCode=0 Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.733474 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940"} Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.733797 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.735466 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980"} Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.737079 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f" exitCode=0 Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.737193 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f"} Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.737230 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.738531 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.738579 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.738590 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.738763 4925 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7" exitCode=0 Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.738811 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7"} Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.738898 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.739859 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.739886 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.739896 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.740922 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.742264 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.742322 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.742337 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.743215 4925 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89" exitCode=0 Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.743284 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89"} Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.743486 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.771121 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.771166 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.771177 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.771168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.771363 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:19 crc kubenswrapper[4925]: I0121 10:55:19.771412 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.401987 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.595533 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 14:37:15.738575483 +0000 UTC Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.801204 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.801359 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.813998 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.814063 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.814191 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.815514 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.815579 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.818631 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.826523 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.826573 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.829580 4925 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87" exitCode=0 Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.829660 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.829830 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.830839 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.830888 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.830902 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.832583 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748"} Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.832757 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.834131 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.834168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:20 crc kubenswrapper[4925]: I0121 10:55:20.834179 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.401835 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.596001 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 20:18:05.934147777 +0000 UTC Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.840558 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32"} Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.840687 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.841925 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.841999 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.842013 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.843581 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d"} Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.843630 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae"} Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.846231 4925 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe" exitCode=0 Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.846357 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.846354 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe"} Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.846451 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.846599 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847504 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847546 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847626 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847649 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847661 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847672 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847705 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:21 crc kubenswrapper[4925]: I0121 10:55:21.847723 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.595520 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.596467 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 18:34:41.926915201 +0000 UTC Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.597053 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.597087 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.597097 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.597125 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.853451 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30"} Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.856604 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3"} Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.856769 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.856847 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.858046 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.858114 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:22 crc kubenswrapper[4925]: I0121 10:55:22.858135 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.518358 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.596989 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 10:19:35.358435278 +0000 UTC Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.862802 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.862802 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.947141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.947230 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.947253 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.958437 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.959095 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:23 crc kubenswrapper[4925]: I0121 10:55:23.959128 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:24 crc kubenswrapper[4925]: I0121 10:55:24.597950 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 03:35:25.005421667 +0000 UTC Jan 21 10:55:24 crc kubenswrapper[4925]: I0121 10:55:24.869812 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41"} Jan 21 10:55:24 crc kubenswrapper[4925]: I0121 10:55:24.869867 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a"} Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.302578 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.302789 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.304149 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.304199 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.304223 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.598215 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 20:36:13.723780936 +0000 UTC Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.876971 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59"} Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.877021 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621"} Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.877088 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.877861 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.877894 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.877905 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.905657 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.905875 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.906116 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.907334 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.907378 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:25 crc kubenswrapper[4925]: I0121 10:55:25.907408 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.598673 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 15:13:30.734005839 +0000 UTC Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.622692 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.879745 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.879786 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.880996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.880996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.881125 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.881153 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.881079 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:26 crc kubenswrapper[4925]: I0121 10:55:26.881243 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:27 crc kubenswrapper[4925]: I0121 10:55:27.599602 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 07:44:58.409820636 +0000 UTC Jan 21 10:55:27 crc kubenswrapper[4925]: I0121 10:55:27.882878 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:27 crc kubenswrapper[4925]: I0121 10:55:27.884221 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:27 crc kubenswrapper[4925]: I0121 10:55:27.884269 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:27 crc kubenswrapper[4925]: I0121 10:55:27.884280 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:28 crc kubenswrapper[4925]: I0121 10:55:28.600657 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 06:23:55.849709768 +0000 UTC Jan 21 10:55:28 crc kubenswrapper[4925]: I0121 10:55:28.626708 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 21 10:55:28 crc kubenswrapper[4925]: I0121 10:55:28.627329 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:28 crc kubenswrapper[4925]: I0121 10:55:28.629421 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:28 crc kubenswrapper[4925]: I0121 10:55:28.629479 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:28 crc kubenswrapper[4925]: I0121 10:55:28.629494 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:29 crc kubenswrapper[4925]: I0121 10:55:29.398786 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:29 crc kubenswrapper[4925]: I0121 10:55:29.399658 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:29 crc kubenswrapper[4925]: I0121 10:55:29.401485 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:29 crc kubenswrapper[4925]: I0121 10:55:29.401536 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:29 crc kubenswrapper[4925]: I0121 10:55:29.401555 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:29 crc kubenswrapper[4925]: I0121 10:55:29.601270 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 13:36:11.914736584 +0000 UTC Jan 21 10:55:29 crc kubenswrapper[4925]: E0121 10:55:29.682829 4925 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.582934 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.583626 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.585586 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.585683 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.585702 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.601560 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 21:04:12.198979373 +0000 UTC Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.604175 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.612713 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.893505 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.895000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.895051 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.895065 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:30 crc kubenswrapper[4925]: I0121 10:55:30.898225 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:31 crc kubenswrapper[4925]: I0121 10:55:31.601831 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 00:50:23.235957069 +0000 UTC Jan 21 10:55:31 crc kubenswrapper[4925]: I0121 10:55:31.895935 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:31 crc kubenswrapper[4925]: I0121 10:55:31.897000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:31 crc kubenswrapper[4925]: I0121 10:55:31.897029 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:31 crc kubenswrapper[4925]: I0121 10:55:31.897038 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:32 crc kubenswrapper[4925]: E0121 10:55:32.079415 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="7s" Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.402639 4925 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.514089 4925 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.515070 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 21 10:55:32 crc kubenswrapper[4925]: E0121 10:55:32.598442 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.602447 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 18:49:01.564071604 +0000 UTC Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.898356 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.899454 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.899496 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:32 crc kubenswrapper[4925]: I0121 10:55:32.899515 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:33 crc kubenswrapper[4925]: E0121 10:55:33.522024 4925 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.584505 4925 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.584615 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.602943 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 23:50:20.355863735 +0000 UTC Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.687866 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.688111 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.689590 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.689627 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:33 crc kubenswrapper[4925]: I0121 10:55:33.689641 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:34 crc kubenswrapper[4925]: I0121 10:55:34.604227 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 20:05:01.560610586 +0000 UTC Jan 21 10:55:35 crc kubenswrapper[4925]: I0121 10:55:35.365517 4925 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 21 10:55:35 crc kubenswrapper[4925]: I0121 10:55:35.366002 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 21 10:55:35 crc kubenswrapper[4925]: I0121 10:55:35.378926 4925 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 21 10:55:35 crc kubenswrapper[4925]: I0121 10:55:35.378999 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 21 10:55:35 crc kubenswrapper[4925]: I0121 10:55:35.605282 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 21:40:29.128348855 +0000 UTC Jan 21 10:55:36 crc kubenswrapper[4925]: I0121 10:55:36.606310 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 03:20:01.997864168 +0000 UTC Jan 21 10:55:36 crc kubenswrapper[4925]: I0121 10:55:36.655600 4925 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]log ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]etcd ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/generic-apiserver-start-informers ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/priority-and-fairness-filter ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-apiextensions-informers ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-apiextensions-controllers ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/crd-informer-synced ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-system-namespaces-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-service-ip-repair-controllers ok Jan 21 10:55:36 crc kubenswrapper[4925]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/priority-and-fairness-config-producer ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/bootstrap-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/start-kube-aggregator-informers ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/apiservice-registration-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/apiservice-discovery-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]autoregister-completion ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/apiservice-openapi-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 21 10:55:36 crc kubenswrapper[4925]: livez check failed Jan 21 10:55:36 crc kubenswrapper[4925]: I0121 10:55:36.655681 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:55:37 crc kubenswrapper[4925]: I0121 10:55:37.606962 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 13:42:01.029771213 +0000 UTC Jan 21 10:55:38 crc kubenswrapper[4925]: I0121 10:55:38.607502 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 00:49:25.778687267 +0000 UTC Jan 21 10:55:39 crc kubenswrapper[4925]: I0121 10:55:39.598563 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:39 crc kubenswrapper[4925]: I0121 10:55:39.599865 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:39 crc kubenswrapper[4925]: I0121 10:55:39.599917 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:39 crc kubenswrapper[4925]: I0121 10:55:39.599935 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:39 crc kubenswrapper[4925]: I0121 10:55:39.599969 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:39 crc kubenswrapper[4925]: E0121 10:55:39.603492 4925 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 21 10:55:39 crc kubenswrapper[4925]: I0121 10:55:39.608531 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 23:23:20.100909243 +0000 UTC Jan 21 10:55:39 crc kubenswrapper[4925]: E0121 10:55:39.683456 4925 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.368997 4925 trace.go:236] Trace[400295796]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 10:55:26.387) (total time: 13981ms): Jan 21 10:55:40 crc kubenswrapper[4925]: Trace[400295796]: ---"Objects listed" error: 13981ms (10:55:40.368) Jan 21 10:55:40 crc kubenswrapper[4925]: Trace[400295796]: [13.981850298s] [13.981850298s] END Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.369039 4925 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.369551 4925 trace.go:236] Trace[70525944]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 10:55:27.030) (total time: 13339ms): Jan 21 10:55:40 crc kubenswrapper[4925]: Trace[70525944]: ---"Objects listed" error: 13338ms (10:55:40.369) Jan 21 10:55:40 crc kubenswrapper[4925]: Trace[70525944]: [13.339015925s] [13.339015925s] END Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.369611 4925 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.369647 4925 trace.go:236] Trace[293737988]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 10:55:29.709) (total time: 10659ms): Jan 21 10:55:40 crc kubenswrapper[4925]: Trace[293737988]: ---"Objects listed" error: 10659ms (10:55:40.369) Jan 21 10:55:40 crc kubenswrapper[4925]: Trace[293737988]: [10.659710706s] [10.659710706s] END Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.369664 4925 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.383096 4925 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.609494 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 14:56:48.939335501 +0000 UTC Jan 21 10:55:40 crc kubenswrapper[4925]: I0121 10:55:40.609563 4925 apiserver.go:52] "Watching apiserver" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.296553 4925 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.297243 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.298033 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.547376 4925 trace.go:236] Trace[290782554]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 10:55:28.014) (total time: 13283ms): Jan 21 10:55:41 crc kubenswrapper[4925]: Trace[290782554]: ---"Objects listed" error: 13283ms (10:55:41.297) Jan 21 10:55:41 crc kubenswrapper[4925]: Trace[290782554]: [13.283810919s] [13.283810919s] END Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.547500 4925 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.547689 4925 trace.go:236] Trace[1435776473]: "DeltaFIFO Pop Process" ID:default/kubernetes,Depth:52,Reason:slow event handlers blocking the queue (21-Jan-2026 10:55:41.297) (total time: 249ms): Jan 21 10:55:41 crc kubenswrapper[4925]: Trace[1435776473]: [249.648331ms] [249.648331ms] END Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.548984 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.549092 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.549135 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.549218 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.549294 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.549319 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.549443 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.549492 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.551229 4925 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38030->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.551294 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38030->192.168.126.11:17697: read: connection reset by peer" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.553344 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.553351 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.554108 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.554187 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.554269 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.554418 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.555178 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.556000 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.556324 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.588529 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.605165 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.609108 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.610166 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 22:06:35.37063703 +0000 UTC Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.613534 4925 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.615220 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.629493 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.630319 4925 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.630428 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.634082 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.635342 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.646438 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.648903 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.648959 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649004 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649022 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649046 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649063 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649082 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649101 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649453 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649521 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649551 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649607 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649627 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649656 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649685 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649703 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649719 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649736 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649757 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649754 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649778 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649799 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649872 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649893 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649909 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649933 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649973 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.649992 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650009 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650025 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650041 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650061 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650077 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650093 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650109 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650153 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650170 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650185 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650237 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650257 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650276 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650269 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650304 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650334 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650419 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650452 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650479 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650512 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650540 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650567 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650568 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650612 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650638 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650643 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650664 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650690 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650714 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650776 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650800 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650820 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650841 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650844 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650862 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650893 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650924 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650956 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.650983 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651008 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651014 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651034 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651081 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651100 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651119 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651151 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651138 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651185 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651199 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651217 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651236 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651239 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651261 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651279 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651298 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651315 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651332 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651351 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651369 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651409 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651428 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651444 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651460 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651473 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651494 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651511 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651529 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651546 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651565 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651582 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651614 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651642 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651664 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651681 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651697 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651715 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651733 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651751 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651770 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651804 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651837 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651859 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651879 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651899 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651916 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651932 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651961 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651978 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651998 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652017 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652081 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652103 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652162 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652182 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652201 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652222 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652241 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652295 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652636 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652660 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652682 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652702 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652721 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652741 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652762 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652782 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652950 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652976 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653003 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653026 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653050 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653072 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653096 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653119 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653189 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653214 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653237 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653293 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.653347 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.655306 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.655373 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681213 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681318 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681359 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681432 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681469 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684079 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684199 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684226 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684261 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684294 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684320 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684345 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684371 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684425 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684456 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684495 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684517 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684538 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684562 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684581 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684602 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684624 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684644 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684665 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684692 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684716 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684734 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684755 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684775 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684792 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684811 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684831 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684852 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684871 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684894 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684913 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684930 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684949 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684968 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684985 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685006 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685026 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685146 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685173 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685197 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685222 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685240 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685262 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685304 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685341 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685365 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685421 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685461 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685495 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685527 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685560 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685579 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685599 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685619 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685639 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685716 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685766 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.685787 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.706307 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.706457 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.707987 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708154 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708267 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708383 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708515 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708619 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708720 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708861 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.708973 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709206 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709289 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709377 4925 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709501 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709650 4925 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709748 4925 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709843 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.709963 4925 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.710086 4925 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.710206 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.710492 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.710592 4925 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.710685 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.710785 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.763099 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.763536 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651469 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651611 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651821 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651851 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.651874 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652097 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652352 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652546 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.652689 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.654290 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.654245 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.654573 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.654812 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.655242 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.655721 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.656669 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.656823 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.657094 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.657428 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.657868 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.657982 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.658442 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.658815 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.658878 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.659129 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.764101 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.764564 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.764716 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.764912 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.659287 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.659558 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.656004 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.660487 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.660886 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.661296 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.661569 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.663666 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.664107 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.663975 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.664747 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.665788 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.666915 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.667228 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.667560 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.668032 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.669217 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.669954 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.666036 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.680733 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.680806 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681291 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681328 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681419 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.656187 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681784 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681901 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681983 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.682016 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.682443 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.682503 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.682488 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.682567 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.682607 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.682785 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.683144 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.683169 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.683349 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.683645 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.683698 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.681543 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684035 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684050 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684188 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.684223 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.701900 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.702175 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.702463 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.702472 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.702574 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.702574 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.703144 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.703103 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.703076 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.705134 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.705444 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.705863 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.707836 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.761182 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.761199 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.761587 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.761617 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.761650 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.761767 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.761785 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.762038 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:55:42.262005949 +0000 UTC m=+33.865897883 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.762122 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.762145 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.762173 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.762250 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.762312 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.762496 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.762798 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.762854 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.763078 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.763141 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.763414 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.763594 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.766571 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:42.266329093 +0000 UTC m=+33.870221027 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.766609 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:42.266597061 +0000 UTC m=+33.870488995 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.767698 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.767817 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.767946 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.768164 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.768789 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769002 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769235 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769275 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769390 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769387 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769642 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769708 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769908 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769973 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769992 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.769201 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.770361 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.770387 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.771085 4925 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.771105 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.771236 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.771364 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.771611 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772053 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772172 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772229 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772416 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772437 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772444 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772557 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772573 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772805 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772896 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.772924 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773001 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773123 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773153 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773119 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773229 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773306 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773626 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.773896 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.774026 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.775993 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776125 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776191 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776324 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776405 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776504 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776586 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776606 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.776871 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.777112 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.777150 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.778378 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.779009 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.779146 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.779166 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.779181 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.779245 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:42.279225459 +0000 UTC m=+33.883117383 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.779347 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.789601 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.790021 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.790932 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.799012 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.813976 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814129 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814318 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814335 4925 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814349 4925 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814362 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814374 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814386 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814416 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814428 4925 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814440 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814451 4925 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814462 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814472 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814483 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814494 4925 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814506 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814520 4925 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814532 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814542 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814553 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814564 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814575 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814586 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814598 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814609 4925 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814622 4925 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814633 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814643 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814653 4925 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814665 4925 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814676 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814686 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814696 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814707 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814718 4925 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814728 4925 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814738 4925 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814748 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814760 4925 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814771 4925 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814782 4925 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814792 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814802 4925 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814813 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814828 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814852 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814914 4925 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814925 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814944 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814962 4925 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814974 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.814985 4925 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815017 4925 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815028 4925 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815041 4925 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815059 4925 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815075 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815095 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815106 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815122 4925 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815134 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815146 4925 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815157 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815168 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815180 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815190 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815201 4925 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815212 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815224 4925 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815236 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815249 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815261 4925 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815274 4925 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815288 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815300 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815312 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815323 4925 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815333 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815352 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815369 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815380 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815390 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815418 4925 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815429 4925 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815439 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815450 4925 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815472 4925 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815487 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815505 4925 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815519 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815530 4925 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815544 4925 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815555 4925 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815571 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815588 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815600 4925 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815615 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815630 4925 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815640 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815651 4925 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815683 4925 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815694 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815705 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815714 4925 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815724 4925 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815735 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815744 4925 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815754 4925 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815764 4925 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815774 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815791 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815801 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815810 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815820 4925 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815844 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815856 4925 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815867 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815878 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815888 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815899 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815911 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815922 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815932 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815956 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815968 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815986 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.815997 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816007 4925 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816031 4925 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816040 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816051 4925 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816061 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816071 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816080 4925 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816092 4925 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816102 4925 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816115 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816125 4925 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816135 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816145 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816155 4925 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816165 4925 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816179 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816190 4925 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816201 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816211 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816222 4925 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816231 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816242 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816253 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816265 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816276 4925 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816287 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816298 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816309 4925 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816320 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816331 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816343 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816354 4925 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816365 4925 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816376 4925 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816637 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.816697 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.822988 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.903578 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.904486 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.904610 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.904666 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.904770 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.904884 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.905404 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.906836 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.907588 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.907642 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.907772 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.911259 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.916028 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.916146 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.916241 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.916467 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917774 4925 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917828 4925 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917844 4925 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917853 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917864 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917873 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917882 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917892 4925 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917901 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917911 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917920 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917929 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917939 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917946 4925 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917955 4925 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.917964 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.919268 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.921804 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.922552 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.922668 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.922751 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:41 crc kubenswrapper[4925]: E0121 10:55:41.922866 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:42.422844562 +0000 UTC m=+34.026736496 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.923026 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.925244 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.928880 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.928985 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.946109 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.948545 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.949307 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: W0121 10:55:41.949428 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-f9d9c38ccc5e3cae4af27a87fdd0cc0836162563dfe868421e900378425c56bc WatchSource:0}: Error finding container f9d9c38ccc5e3cae4af27a87fdd0cc0836162563dfe868421e900378425c56bc: Status 404 returned error can't find the container with id f9d9c38ccc5e3cae4af27a87fdd0cc0836162563dfe868421e900378425c56bc Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.955188 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.976948 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:41 crc kubenswrapper[4925]: I0121 10:55:41.994557 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.011865 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.018648 4925 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.018905 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.019017 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.019122 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.028361 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.045270 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.097701 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.107115 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.119724 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.132297 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.144931 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.195536 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.211022 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 10:55:42 crc kubenswrapper[4925]: W0121 10:55:42.255540 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-06152d34fa461753c26f7d423cd068879fa0adfc582f83df5efdfbf09bdd183b WatchSource:0}: Error finding container 06152d34fa461753c26f7d423cd068879fa0adfc582f83df5efdfbf09bdd183b: Status 404 returned error can't find the container with id 06152d34fa461753c26f7d423cd068879fa0adfc582f83df5efdfbf09bdd183b Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.282658 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"06152d34fa461753c26f7d423cd068879fa0adfc582f83df5efdfbf09bdd183b"} Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.285257 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"6cced5a1d0d7f2c06a4f63fcbe70c205db0af978b4b3193e4b24d7387faed286"} Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.286537 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"f9d9c38ccc5e3cae4af27a87fdd0cc0836162563dfe868421e900378425c56bc"} Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.288755 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.290810 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30" exitCode=255 Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.291555 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30"} Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.292236 4925 scope.go:117] "RemoveContainer" containerID="80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.321865 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.321970 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.322074 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:55:43.322041091 +0000 UTC m=+34.925933045 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.322182 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.322240 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.322763 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.322998 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:43.322947151 +0000 UTC m=+34.926839085 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.323096 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.323203 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.323319 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.323485 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:43.323461708 +0000 UTC m=+34.927353652 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.323166 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.323724 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:43.323710836 +0000 UTC m=+34.927602780 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.330841 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.346779 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.359618 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.373563 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.388430 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.404893 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.424266 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.424353 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.424370 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:42 crc kubenswrapper[4925]: E0121 10:55:42.424509 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:43.424485411 +0000 UTC m=+35.028377345 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.424607 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.425071 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.476030 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.512483 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:42 crc kubenswrapper[4925]: I0121 10:55:42.660909 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 22:26:38.173275459 +0000 UTC Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.295469 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe"} Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.299139 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.301316 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d"} Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.302459 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0"} Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.313040 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.325576 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.342671 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.361622 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.370703 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.383761 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.393323 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.402744 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.414897 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.415086 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.415131 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.415457 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.415493 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.415523 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.415549 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.415957 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:55:45.415290978 +0000 UTC m=+37.019182912 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.416031 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.416120 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:45.416088184 +0000 UTC m=+37.019980128 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.416143 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.416170 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:45.416159296 +0000 UTC m=+37.020051240 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.416200 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:45.416188597 +0000 UTC m=+37.020080591 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.501175 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.501182 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.501183 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.501356 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.501459 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.504385 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.510996 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.511739 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.513456 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.514249 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.515591 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.516302 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.516849 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.516990 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.517014 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.517025 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:43 crc kubenswrapper[4925]: E0121 10:55:43.517077 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:45.517062235 +0000 UTC m=+37.120954169 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.517266 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.518496 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.519321 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.520509 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.521149 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.522505 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.523208 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.523925 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.524994 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.525623 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.526739 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.527225 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.527938 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.529111 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.529686 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.530740 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.531215 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.533758 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.534682 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.535584 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.537199 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.537840 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.539036 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.539549 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.540619 4925 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.540734 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.542769 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.543899 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.544469 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.546158 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.546918 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.547905 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.548725 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.550011 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.551635 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.552760 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.553527 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.554579 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.555112 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.556137 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.556920 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.558521 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.559125 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.560250 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.561147 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.562192 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.563097 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.563789 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.661449 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 05:28:23.601245041 +0000 UTC Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.760165 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.777863 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.778243 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.780719 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.800164 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.818937 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.834010 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.850287 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.865471 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.878110 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.894514 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.911892 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.928929 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.946244 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.962629 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:43 crc kubenswrapper[4925]: I0121 10:55:43.987527 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:43Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.145407 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.174471 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.189369 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.209920 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.308228 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6"} Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.309088 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.326053 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.338972 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.354435 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.366616 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.392203 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.411246 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.430150 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.448425 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.472986 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.490451 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.507471 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.527721 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.549034 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.572550 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.587219 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.605484 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.661406 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.661662 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 08:20:58.699901696 +0000 UTC Jan 21 10:55:44 crc kubenswrapper[4925]: I0121 10:55:44.678070 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:44Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.498020 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.498088 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.498124 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.498160 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.498255 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.498333 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:49.498312046 +0000 UTC m=+41.102204010 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.498884 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:55:49.498873125 +0000 UTC m=+41.102765059 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.498960 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.498989 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:49.498980998 +0000 UTC m=+41.102872932 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.499021 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.499056 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.499073 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.499140 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:49.499118163 +0000 UTC m=+41.103010127 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.503346 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.503526 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.503603 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.503665 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.503731 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.503776 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.598670 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.598850 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.598874 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.598886 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:45 crc kubenswrapper[4925]: E0121 10:55:45.598935 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:49.598918106 +0000 UTC m=+41.202810040 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:45 crc kubenswrapper[4925]: I0121 10:55:45.662489 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 19:46:49.849359454 +0000 UTC Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.317550 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61"} Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.357747 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.457348 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.864760 4925 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.867459 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 18:58:51.501478941 +0000 UTC Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.898779 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.898830 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.898844 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.898919 4925 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.974829 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.988314 4925 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.989316 4925 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.992114 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.992151 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.992162 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.992179 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:46 crc kubenswrapper[4925]: I0121 10:55:46.992194 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:46Z","lastTransitionTime":"2026-01-21T10:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.162604 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.178838 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.193342 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-rzmbp"] Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.193941 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.196029 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.198202 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.198602 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.201595 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.204599 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.211514 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.211563 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.211576 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.211594 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.211615 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.216914 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb96t\" (UniqueName: \"kubernetes.io/projected/ae658aae-64a2-4df8-938c-7a4c2a35655f-kube-api-access-hb96t\") pod \"node-resolver-rzmbp\" (UID: \"ae658aae-64a2-4df8-938c-7a4c2a35655f\") " pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.217180 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ae658aae-64a2-4df8-938c-7a4c2a35655f-hosts-file\") pod \"node-resolver-rzmbp\" (UID: \"ae658aae-64a2-4df8-938c-7a4c2a35655f\") " pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.227461 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.232569 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.232605 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.232614 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.232644 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.232654 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.243183 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.253622 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.259745 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.259783 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.259795 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.259811 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.259820 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.260302 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.272909 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.277255 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.277302 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.277315 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.277331 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.277344 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.282920 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.293191 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.293447 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.295185 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.295247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.295262 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.295278 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.295289 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.305437 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.317805 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb96t\" (UniqueName: \"kubernetes.io/projected/ae658aae-64a2-4df8-938c-7a4c2a35655f-kube-api-access-hb96t\") pod \"node-resolver-rzmbp\" (UID: \"ae658aae-64a2-4df8-938c-7a4c2a35655f\") " pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.317875 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ae658aae-64a2-4df8-938c-7a4c2a35655f-hosts-file\") pod \"node-resolver-rzmbp\" (UID: \"ae658aae-64a2-4df8-938c-7a4c2a35655f\") " pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.317998 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ae658aae-64a2-4df8-938c-7a4c2a35655f-hosts-file\") pod \"node-resolver-rzmbp\" (UID: \"ae658aae-64a2-4df8-938c-7a4c2a35655f\") " pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.322151 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.343415 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb96t\" (UniqueName: \"kubernetes.io/projected/ae658aae-64a2-4df8-938c-7a4c2a35655f-kube-api-access-hb96t\") pod \"node-resolver-rzmbp\" (UID: \"ae658aae-64a2-4df8-938c-7a4c2a35655f\") " pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.346120 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.361889 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.387424 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.398170 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.398214 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.398224 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.398238 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.398249 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.403612 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.419036 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.436064 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.450535 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.478347 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.503031 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.503167 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.503636 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.503695 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.503741 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.503781 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.504717 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.504756 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.504774 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.504790 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.504802 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.512570 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rzmbp" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.616546 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.616594 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.616605 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.616639 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.616652 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: W0121 10:55:47.620652 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae658aae_64a2_4df8_938c_7a4c2a35655f.slice/crio-00c37898628ae36631c3f72acfccf3e201595b5634bedffaeb21b9bcd25e4b58 WatchSource:0}: Error finding container 00c37898628ae36631c3f72acfccf3e201595b5634bedffaeb21b9bcd25e4b58: Status 404 returned error can't find the container with id 00c37898628ae36631c3f72acfccf3e201595b5634bedffaeb21b9bcd25e4b58 Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.725015 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-hwzqb"] Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.725555 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hwzqb" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.727366 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-rzs4q"] Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.727900 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.734810 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.734861 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.734870 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.734890 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.734900 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: W0121 10:55:47.837590 4925 reflector.go:561] object-"openshift-machine-config-operator"/"proxy-tls": failed to list *v1.Secret: secrets "proxy-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.839592 4925 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"proxy-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"proxy-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 10:55:47 crc kubenswrapper[4925]: W0121 10:55:47.837613 4925 reflector.go:561] object-"openshift-multus"/"default-dockercfg-2q5b6": failed to list *v1.Secret: secrets "default-dockercfg-2q5b6" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.839661 4925 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-dockercfg-2q5b6\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"default-dockercfg-2q5b6\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 10:55:47 crc kubenswrapper[4925]: W0121 10:55:47.837662 4925 reflector.go:561] object-"openshift-machine-config-operator"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.839687 4925 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 10:55:47 crc kubenswrapper[4925]: W0121 10:55:47.837837 4925 reflector.go:561] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": failed to list *v1.Secret: secrets "machine-config-daemon-dockercfg-r5tcq" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Jan 21 10:55:47 crc kubenswrapper[4925]: E0121 10:55:47.839717 4925 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-r5tcq\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-config-daemon-dockercfg-r5tcq\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.837858 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.840098 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.840121 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.840133 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.840149 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.840161 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:47Z","lastTransitionTime":"2026-01-21T10:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:47 crc kubenswrapper[4925]: I0121 10:55:47.867967 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 23:44:00.683145207 +0000 UTC Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932039 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-k8s-cni-cncf-io\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932101 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-conf-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932127 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46dvd\" (UniqueName: \"kubernetes.io/projected/f21c81eb-6979-46c3-9594-e4916d36fb0a-kube-api-access-46dvd\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932152 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dccvf\" (UniqueName: \"kubernetes.io/projected/82b678c3-b1e1-4294-9f9f-02103a6823cc-kube-api-access-dccvf\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932167 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f21c81eb-6979-46c3-9594-e4916d36fb0a-proxy-tls\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932186 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f21c81eb-6979-46c3-9594-e4916d36fb0a-rootfs\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932210 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-socket-dir-parent\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932226 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-cni-bin\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932280 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-cni-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932317 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-cnibin\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932334 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/82b678c3-b1e1-4294-9f9f-02103a6823cc-cni-binary-copy\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932349 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-cni-multus\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932367 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-hostroot\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932385 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-multus-certs\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932423 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-etc-kubernetes\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932450 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-system-cni-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932466 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-netns\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932483 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-kubelet\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932517 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-os-release\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932534 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-daemon-config\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:47.932553 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f21c81eb-6979-46c3-9594-e4916d36fb0a-mcd-auth-proxy-config\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033279 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-cni-bin\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033498 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-socket-dir-parent\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033571 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-cni-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033651 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-cnibin\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033693 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/82b678c3-b1e1-4294-9f9f-02103a6823cc-cni-binary-copy\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033740 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-cni-multus\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033770 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-hostroot\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033857 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-multus-certs\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033926 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-etc-kubernetes\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.033988 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-system-cni-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034034 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-netns\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034056 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-kubelet\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034096 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f21c81eb-6979-46c3-9594-e4916d36fb0a-mcd-auth-proxy-config\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034147 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-os-release\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034168 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-daemon-config\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034198 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-k8s-cni-cncf-io\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034218 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-conf-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034240 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46dvd\" (UniqueName: \"kubernetes.io/projected/f21c81eb-6979-46c3-9594-e4916d36fb0a-kube-api-access-46dvd\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034264 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dccvf\" (UniqueName: \"kubernetes.io/projected/82b678c3-b1e1-4294-9f9f-02103a6823cc-kube-api-access-dccvf\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034285 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f21c81eb-6979-46c3-9594-e4916d36fb0a-proxy-tls\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034311 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f21c81eb-6979-46c3-9594-e4916d36fb0a-rootfs\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034443 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f21c81eb-6979-46c3-9594-e4916d36fb0a-rootfs\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034506 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-cni-bin\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034785 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-socket-dir-parent\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.034920 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-netns\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035229 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-cni-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035287 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-hostroot\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035368 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-cnibin\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035510 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-cni-multus\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035540 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-etc-kubernetes\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035562 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-multus-certs\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035650 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-system-cni-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035676 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-run-k8s-cni-cncf-io\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035700 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-host-var-lib-kubelet\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.035766 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-os-release\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.036281 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-conf-dir\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.072526 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.072586 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.072601 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.072623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.072640 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.079700 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.098968 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.103624 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.103820 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.103962 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.108201 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/82b678c3-b1e1-4294-9f9f-02103a6823cc-multus-daemon-config\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.108318 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/82b678c3-b1e1-4294-9f9f-02103a6823cc-cni-binary-copy\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.136835 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dccvf\" (UniqueName: \"kubernetes.io/projected/82b678c3-b1e1-4294-9f9f-02103a6823cc-kube-api-access-dccvf\") pod \"multus-hwzqb\" (UID: \"82b678c3-b1e1-4294-9f9f-02103a6823cc\") " pod="openshift-multus/multus-hwzqb" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.142145 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46dvd\" (UniqueName: \"kubernetes.io/projected/f21c81eb-6979-46c3-9594-e4916d36fb0a-kube-api-access-46dvd\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.147582 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.172057 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.175984 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.176053 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.176079 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.176127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.176147 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.199955 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.221982 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.250353 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.271254 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.278690 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.278742 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.278756 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.278773 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.278786 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.301651 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.325241 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rzmbp" event={"ID":"ae658aae-64a2-4df8-938c-7a4c2a35655f","Type":"ContainerStarted","Data":"00c37898628ae36631c3f72acfccf3e201595b5634bedffaeb21b9bcd25e4b58"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.331751 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.348936 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.379342 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.381046 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.381091 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.381105 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.381124 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.381139 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.405861 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.433295 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.449945 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.462606 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.476384 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.484193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.484255 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.484271 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.484291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.484303 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.489054 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9hk9g"] Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.490150 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.491118 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-pbw2x"] Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.491867 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.492069 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.492774 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.492873 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.492873 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.492822 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.492884 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.493076 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.493574 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.494047 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.499612 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.519567 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.533410 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540463 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540522 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2b0b25f1-8430-459d-9805-e667615dc073-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540554 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-kubelet\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540577 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540600 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-config\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540640 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-ovn-kubernetes\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540661 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-os-release\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540682 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjd7c\" (UniqueName: \"kubernetes.io/projected/3a976857-73df-49d9-9b7e-b5cb3d250a5f-kube-api-access-cjd7c\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540713 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-netns\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540734 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-var-lib-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540752 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-etc-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540797 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-env-overrides\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540820 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-cnibin\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540846 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-node-log\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540869 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-script-lib\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540953 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-system-cni-dir\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.540978 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-ovn\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541002 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-netd\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541024 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-slash\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541045 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-log-socket\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541066 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-bin\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541086 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-tuning-conf-dir\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541119 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-systemd-units\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541142 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-systemd\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541163 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovn-node-metrics-cert\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541184 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2b0b25f1-8430-459d-9805-e667615dc073-cni-binary-copy\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.541203 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-928hc\" (UniqueName: \"kubernetes.io/projected/2b0b25f1-8430-459d-9805-e667615dc073-kube-api-access-928hc\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.548549 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.566800 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.587066 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.587372 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.587511 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.587627 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.587716 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.603413 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.627747 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745478 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-systemd-units\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745539 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-systemd\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745583 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovn-node-metrics-cert\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745623 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2b0b25f1-8430-459d-9805-e667615dc073-cni-binary-copy\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745629 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-systemd-units\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745695 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-systemd\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745660 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-928hc\" (UniqueName: \"kubernetes.io/projected/2b0b25f1-8430-459d-9805-e667615dc073-kube-api-access-928hc\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745790 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-kubelet\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745818 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745857 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745886 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2b0b25f1-8430-459d-9805-e667615dc073-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745942 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-config\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745964 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-ovn-kubernetes\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745983 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjd7c\" (UniqueName: \"kubernetes.io/projected/3a976857-73df-49d9-9b7e-b5cb3d250a5f-kube-api-access-cjd7c\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.745998 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-os-release\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746013 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-etc-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746046 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-netns\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746166 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-var-lib-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746189 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-env-overrides\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746210 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-node-log\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746226 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-script-lib\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746244 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-cnibin\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746307 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-system-cni-dir\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746328 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-ovn\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746349 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-netd\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746378 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-bin\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746421 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-tuning-conf-dir\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746449 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-slash\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746464 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-log-socket\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746526 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-log-socket\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746686 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2b0b25f1-8430-459d-9805-e667615dc073-cni-binary-copy\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746751 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746778 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-kubelet\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746808 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746833 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-node-log\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.746858 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-ovn-kubernetes\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747131 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-os-release\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747176 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-etc-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747212 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-netns\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747242 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-var-lib-openvswitch\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747350 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-config\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747519 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-netd\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747554 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-cnibin\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747581 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-ovn\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747604 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-slash\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747601 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-system-cni-dir\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747636 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-bin\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747719 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-env-overrides\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.747975 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2b0b25f1-8430-459d-9805-e667615dc073-tuning-conf-dir\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.748107 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-script-lib\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.749250 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2b0b25f1-8430-459d-9805-e667615dc073-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.749976 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovn-node-metrics-cert\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.750023 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.750205 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.750303 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.750420 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.750539 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.762750 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.773948 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-928hc\" (UniqueName: \"kubernetes.io/projected/2b0b25f1-8430-459d-9805-e667615dc073-kube-api-access-928hc\") pod \"multus-additional-cni-plugins-pbw2x\" (UID: \"2b0b25f1-8430-459d-9805-e667615dc073\") " pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.785001 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.793126 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjd7c\" (UniqueName: \"kubernetes.io/projected/3a976857-73df-49d9-9b7e-b5cb3d250a5f-kube-api-access-cjd7c\") pod \"ovnkube-node-9hk9g\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.797772 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.804297 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.807966 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f21c81eb-6979-46c3-9594-e4916d36fb0a-mcd-auth-proxy-config\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.811434 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.815769 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.852388 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.852439 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.852450 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.852467 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.852477 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.868552 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 01:28:56.681142358 +0000 UTC Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.955031 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.955077 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.955087 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.955105 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.955117 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:48Z","lastTransitionTime":"2026-01-21T10:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:48 crc kubenswrapper[4925]: I0121 10:55:48.996055 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:48Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.039022 4925 secret.go:188] Couldn't get secret openshift-machine-config-operator/proxy-tls: failed to sync secret cache: timed out waiting for the condition Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.039170 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f21c81eb-6979-46c3-9594-e4916d36fb0a-proxy-tls podName:f21c81eb-6979-46c3-9594-e4916d36fb0a nodeName:}" failed. No retries permitted until 2026-01-21 10:55:49.539132466 +0000 UTC m=+41.143024400 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/f21c81eb-6979-46c3-9594-e4916d36fb0a-proxy-tls") pod "machine-config-daemon-rzs4q" (UID: "f21c81eb-6979-46c3-9594-e4916d36fb0a") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.045430 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.059923 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.059977 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.059990 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.060011 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.060036 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:49Z","lastTransitionTime":"2026-01-21T10:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.065540 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.083631 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.098988 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.113136 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.133335 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.149822 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.153660 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.162116 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.162168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.162182 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.162198 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.162208 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:49Z","lastTransitionTime":"2026-01-21T10:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.172125 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.192154 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.206329 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.217878 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.265210 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.265269 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.265284 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.265305 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.265318 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:49Z","lastTransitionTime":"2026-01-21T10:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.285941 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.329731 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerStarted","Data":"a8b482aba840990ca385b9cdb4c7cbd17b57d312989f69e0715bb56df6ab41ea"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.331156 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rzmbp" event={"ID":"ae658aae-64a2-4df8-938c-7a4c2a35655f","Type":"ContainerStarted","Data":"8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.332061 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"1bcbdfb70b58bca9fbbc0f2d4d5705d025f9e4b48e99d9c0511d73ac2ead5ce3"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.348193 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.367461 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.367527 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.367538 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.367556 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.367568 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:49Z","lastTransitionTime":"2026-01-21T10:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.367981 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.384434 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.387561 4925 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-multus/multus-hwzqb" secret="" err="failed to sync secret cache: timed out waiting for the condition" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.387629 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hwzqb" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.398415 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.784774 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.784968 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.785064 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.785298 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.785714 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.785811 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.791376 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.791724 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.791787 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.791905 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f21c81eb-6979-46c3-9594-e4916d36fb0a-proxy-tls\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.791961 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.792010 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.800817 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.800914 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:57.800891273 +0000 UTC m=+49.404783207 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.801008 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:55:57.800999266 +0000 UTC m=+49.404891200 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.801094 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.801105 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.801116 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.801147 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:57.801139951 +0000 UTC m=+49.405031885 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.801763 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.801830 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:57.801812833 +0000 UTC m=+49.405704767 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.802168 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.802188 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.802201 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:49 crc kubenswrapper[4925]: E0121 10:55:49.802237 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:55:57.802225786 +0000 UTC m=+49.406117721 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.802483 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.807095 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.807135 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.807147 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.807174 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.807193 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:49Z","lastTransitionTime":"2026-01-21T10:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.807214 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.815342 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f21c81eb-6979-46c3-9594-e4916d36fb0a-proxy-tls\") pod \"machine-config-daemon-rzs4q\" (UID: \"f21c81eb-6979-46c3-9594-e4916d36fb0a\") " pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.829133 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.839388 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.861952 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.869210 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 17:51:19.964619519 +0000 UTC Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.875890 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.887676 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.900586 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.909649 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.909691 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.909703 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.909720 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.909731 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:49Z","lastTransitionTime":"2026-01-21T10:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.920299 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.932819 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.939983 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.945518 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.960406 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.973584 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.988182 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:49 crc kubenswrapper[4925]: I0121 10:55:49.998992 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.012620 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.012669 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.012681 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.012696 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.012707 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.018725 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.037746 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.049455 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.060672 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.072158 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.088631 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.100571 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.216286 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.216325 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.216335 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.216351 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.216433 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.216725 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.237869 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.253877 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:50Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.320021 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.320051 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: W0121 10:55:50.320021 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf21c81eb_6979_46c3_9594_e4916d36fb0a.slice/crio-f0bf1d34e1fe23c14b5b22b0d3ec18b97580329903c6cbd85ca294c179fbb49f WatchSource:0}: Error finding container f0bf1d34e1fe23c14b5b22b0d3ec18b97580329903c6cbd85ca294c179fbb49f: Status 404 returned error can't find the container with id f0bf1d34e1fe23c14b5b22b0d3ec18b97580329903c6cbd85ca294c179fbb49f Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.320061 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.320085 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.320097 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.331188 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.342195 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"f0bf1d34e1fe23c14b5b22b0d3ec18b97580329903c6cbd85ca294c179fbb49f"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.344742 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerStarted","Data":"f72bafa88cd8fe16cf880700f1d2b7a635a966889bd9e62797438fc6ad5a9f21"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.352823 4925 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.367108 4925 csr.go:261] certificate signing request csr-2w2cz is approved, waiting to be issued Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.422592 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.422637 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.422649 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.422670 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.422682 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.524878 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.524923 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.524938 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.524956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.524968 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.627575 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.627625 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.627636 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.627655 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.627666 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.730023 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.730060 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.730069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.730082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.730092 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.832348 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.832383 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.832419 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.832442 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.832454 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.870657 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 19:56:00.003479818 +0000 UTC Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.935231 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.935281 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.935293 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.935312 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.935323 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:50Z","lastTransitionTime":"2026-01-21T10:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:50 crc kubenswrapper[4925]: I0121 10:55:50.967419 4925 csr.go:257] certificate signing request csr-2w2cz is issued Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.037621 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.037678 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.037693 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.037714 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.037759 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.172183 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.172215 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.172225 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.172241 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.172252 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.275489 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.275537 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.275549 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.275568 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.275580 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.349453 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerStarted","Data":"7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.356897 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3" exitCode=0 Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.357366 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.362332 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.365324 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerStarted","Data":"5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.371554 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.382691 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.382753 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.382765 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.382785 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.382797 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.387610 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.411604 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.481832 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.487168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.487205 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.487214 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.487232 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.487247 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.501018 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.501097 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:51 crc kubenswrapper[4925]: E0121 10:55:51.501175 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:51 crc kubenswrapper[4925]: E0121 10:55:51.501331 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.501387 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:51 crc kubenswrapper[4925]: E0121 10:55:51.501635 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.506609 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.701281 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.702126 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.702174 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.702184 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.702206 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.702216 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.723104 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.746487 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.760273 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.773895 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.866475 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.866531 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.866545 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.866573 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.866583 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.871791 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 23:06:32.627813498 +0000 UTC Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.878758 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.892867 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.907606 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.921204 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.936875 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.955650 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.968307 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-21 10:50:50 +0000 UTC, rotation deadline is 2026-10-26 09:31:56.740012454 +0000 UTC Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.968372 4925 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6670h36m4.771645248s for next certificate rotation Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.970642 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.970699 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.970740 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.970765 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.970779 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:51Z","lastTransitionTime":"2026-01-21T10:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:51 crc kubenswrapper[4925]: I0121 10:55:51.975903 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.005432 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:51Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.035581 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.053000 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.071911 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.075466 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.075542 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.075553 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.075568 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.075580 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.087285 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.107723 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.139062 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.269899 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.269942 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.269954 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.269880 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.269975 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.270158 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.300193 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.328862 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.353073 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.377580 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.380091 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.398361 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.398424 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.398437 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.398455 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.398468 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.401939 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.414720 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.429498 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.500356 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.501924 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.501967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.501978 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.502007 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.502022 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.519891 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.535353 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.566522 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.689655 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.689705 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.689714 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.689730 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.689739 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.690298 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.721812 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.766723 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.789379 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.793315 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.793366 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.793380 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.793424 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.793440 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.809927 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.829901 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.852412 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:52Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.873025 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 16:07:19.624531773 +0000 UTC Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.895887 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.895936 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.895950 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.895968 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:52 crc kubenswrapper[4925]: I0121 10:55:52.895982 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:52.999422 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:52.999470 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:52.999482 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:52.999497 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:52.999506 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:52Z","lastTransitionTime":"2026-01-21T10:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.102349 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.102380 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.102389 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.102429 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.102439 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.205002 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.205061 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.205075 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.205094 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.205106 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.347426 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.347472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.347485 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.347504 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.347523 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.388627 4925 generic.go:334] "Generic (PLEG): container finished" podID="2b0b25f1-8430-459d-9805-e667615dc073" containerID="5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff" exitCode=0 Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.388715 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerDied","Data":"5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.397794 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.397839 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.397854 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.419548 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.431060 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.443074 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.450020 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.450050 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.450059 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.450072 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.450082 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.457639 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.477824 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.494292 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.502805 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.502945 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.507084 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:53 crc kubenswrapper[4925]: E0121 10:55:53.507380 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:53 crc kubenswrapper[4925]: E0121 10:55:53.507602 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:53 crc kubenswrapper[4925]: E0121 10:55:53.508073 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.517779 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.561817 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.562210 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.562226 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.562233 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.562246 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.562254 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.577084 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.595597 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.633115 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.647202 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.660937 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.664848 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.664881 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.664893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.664910 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.664922 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.674629 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.767666 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.767722 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.767736 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.767757 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.767772 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.889763 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 06:58:14.516102327 +0000 UTC Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.892297 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.892320 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.892328 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.892342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.892352 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.912215 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-jqsxs"] Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.912722 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.916183 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.916477 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.916721 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.916884 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.954441 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.971824 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.991139 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:53Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.995485 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.995540 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.995552 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.995572 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:53 crc kubenswrapper[4925]: I0121 10:55:53.995585 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:53Z","lastTransitionTime":"2026-01-21T10:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.002484 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4e986f25-2ad4-428d-b6a5-f99e1a480285-host\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.002578 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4e986f25-2ad4-428d-b6a5-f99e1a480285-serviceca\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.002832 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flgnv\" (UniqueName: \"kubernetes.io/projected/4e986f25-2ad4-428d-b6a5-f99e1a480285-kube-api-access-flgnv\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.007211 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.027577 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.040925 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.054361 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.072227 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.085179 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.096999 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.098655 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.098696 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.098708 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.098725 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.098736 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.104169 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4e986f25-2ad4-428d-b6a5-f99e1a480285-host\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.104222 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4e986f25-2ad4-428d-b6a5-f99e1a480285-serviceca\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.104264 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flgnv\" (UniqueName: \"kubernetes.io/projected/4e986f25-2ad4-428d-b6a5-f99e1a480285-kube-api-access-flgnv\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.104308 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4e986f25-2ad4-428d-b6a5-f99e1a480285-host\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.105408 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4e986f25-2ad4-428d-b6a5-f99e1a480285-serviceca\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.116253 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.134746 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flgnv\" (UniqueName: \"kubernetes.io/projected/4e986f25-2ad4-428d-b6a5-f99e1a480285-kube-api-access-flgnv\") pod \"node-ca-jqsxs\" (UID: \"4e986f25-2ad4-428d-b6a5-f99e1a480285\") " pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.140766 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.156037 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.169445 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.186698 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.203478 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.203536 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.203547 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.203564 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.203580 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.226687 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-jqsxs" Jan 21 10:55:54 crc kubenswrapper[4925]: W0121 10:55:54.241648 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e986f25_2ad4_428d_b6a5_f99e1a480285.slice/crio-d763d0e43c29bf6df12d2240b1f92042b090c2ea79898bf4498302934db53a8f WatchSource:0}: Error finding container d763d0e43c29bf6df12d2240b1f92042b090c2ea79898bf4498302934db53a8f: Status 404 returned error can't find the container with id d763d0e43c29bf6df12d2240b1f92042b090c2ea79898bf4498302934db53a8f Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.309990 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.310027 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.310039 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.310057 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.310068 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.406446 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerStarted","Data":"fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.407384 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-jqsxs" event={"ID":"4e986f25-2ad4-428d-b6a5-f99e1a480285","Type":"ContainerStarted","Data":"d763d0e43c29bf6df12d2240b1f92042b090c2ea79898bf4498302934db53a8f"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.417465 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.417506 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.417515 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.417532 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.417544 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.418980 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.419042 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.435840 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.452810 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.470853 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.485767 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.502679 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.514251 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.532166 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.534864 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.534892 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.534903 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.534917 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.534927 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.548231 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.563110 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.574255 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.612183 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.637805 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.637845 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.637855 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.637874 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.637884 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.639043 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.662294 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.677590 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.706926 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:54Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.747328 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.747377 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.747405 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.747426 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.747439 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.858119 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.858438 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.858642 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.858753 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.858857 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.928248 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 21:16:16.717652062 +0000 UTC Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.962611 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.962681 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.962697 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.962720 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:54 crc kubenswrapper[4925]: I0121 10:55:54.962735 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:54Z","lastTransitionTime":"2026-01-21T10:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.068183 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.068241 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.068253 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.068276 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.068292 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.173231 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.173778 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.173793 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.173812 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.173824 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.277044 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.277082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.277091 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.277112 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.277124 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.379768 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.379832 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.379848 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.379870 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.379882 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.434830 4925 generic.go:334] "Generic (PLEG): container finished" podID="2b0b25f1-8430-459d-9805-e667615dc073" containerID="fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3" exitCode=0 Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.434913 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerDied","Data":"fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.439000 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-jqsxs" event={"ID":"4e986f25-2ad4-428d-b6a5-f99e1a480285","Type":"ContainerStarted","Data":"06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.455133 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.484274 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.487171 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.487239 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.487255 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.487274 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.487287 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.501917 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:55 crc kubenswrapper[4925]: E0121 10:55:55.502093 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.502719 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.502784 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:55 crc kubenswrapper[4925]: E0121 10:55:55.502855 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:55 crc kubenswrapper[4925]: E0121 10:55:55.502927 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.507758 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.521055 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.536443 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.549778 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.562998 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.579002 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.592311 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.592371 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.592385 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.592427 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.592441 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.594598 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.608144 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.622969 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.640229 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.654483 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.669848 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.682075 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.696822 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.696864 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.696873 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.696888 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.696899 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.703916 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.720042 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.737977 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.753363 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.774130 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.788800 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.801503 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.801565 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.801576 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.801596 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.801609 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.806826 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.821653 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.836492 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.859917 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.880017 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.898671 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.904077 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.904121 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.904149 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.904166 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.904176 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:55Z","lastTransitionTime":"2026-01-21T10:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.917359 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.928838 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 10:45:54.063253772 +0000 UTC Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.932061 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:55 crc kubenswrapper[4925]: I0121 10:55:55.954288 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:55Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.007201 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.007708 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.007719 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.007740 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.007750 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.110500 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.110550 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.110561 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.110578 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.110590 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.292002 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.292058 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.292068 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.292087 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.292100 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.396247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.396299 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.396311 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.396331 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.396353 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.499725 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.499769 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.499779 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.499796 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.499807 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.506219 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.509715 4925 generic.go:334] "Generic (PLEG): container finished" podID="2b0b25f1-8430-459d-9805-e667615dc073" containerID="c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23" exitCode=0 Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.510242 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerDied","Data":"c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.524816 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.540582 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.564838 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.600371 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.604750 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.604814 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.604835 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.604862 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.604889 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.621858 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.647697 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.702580 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.708077 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.708154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.708169 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.708194 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.708213 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.730029 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.751541 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.768301 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.784076 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.798682 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.813749 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.813823 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.813836 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.813867 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.813878 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.815606 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.828974 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.847770 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:56Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.923567 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.923643 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.923656 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.923690 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.923713 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:56Z","lastTransitionTime":"2026-01-21T10:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:56 crc kubenswrapper[4925]: I0121 10:55:56.929337 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 09:26:33.01752711 +0000 UTC Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.026682 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.026728 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.026742 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.026762 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.026777 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.130143 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.130186 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.130195 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.130211 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.130223 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.232971 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.233031 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.233042 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.233061 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.233076 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.336900 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.336945 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.336954 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.336974 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.336984 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.439783 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.439854 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.439868 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.439893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.439907 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.501760 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.501786 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.502019 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.501810 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.502126 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.502132 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.543202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.543250 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.543259 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.543275 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.543285 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.615127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.615202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.615221 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.615247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.615265 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.636813 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:57Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.642388 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.642461 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.642473 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.642492 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.642504 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.661634 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:57Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.666791 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.666832 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.666852 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.666873 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.666888 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.684966 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:57Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.689417 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.689550 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.689676 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.689776 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.689870 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.704134 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:57Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.708770 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.708929 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.709025 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.709120 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.709211 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.728106 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:57Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.728213 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.729785 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.729805 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.729813 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.729822 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.729830 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.807982 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.808232 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:56:13.808202739 +0000 UTC m=+65.412094693 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.808925 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.809077 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.809360 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:13.809343827 +0000 UTC m=+65.413235761 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.810085 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.810208 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.810465 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:13.810452284 +0000 UTC m=+65.414344218 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.810809 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.811469 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.811019 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.812306 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.812530 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.812727 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:13.812705679 +0000 UTC m=+65.416597643 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.811606 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.813261 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.813705 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:57 crc kubenswrapper[4925]: E0121 10:55:57.814258 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:13.814226539 +0000 UTC m=+65.418118513 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.832709 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.832781 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.832800 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.832822 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.832835 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.929936 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 20:35:58.343694059 +0000 UTC Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.934882 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.934935 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.934947 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.934968 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:57 crc kubenswrapper[4925]: I0121 10:55:57.934983 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:57Z","lastTransitionTime":"2026-01-21T10:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.039024 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.039126 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.039155 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.039186 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.039215 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.141681 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.141725 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.141733 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.141754 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.141765 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.244335 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.244387 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.244433 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.244453 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.244468 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.255127 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.275433 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.289323 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.304724 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.323193 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.343598 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.352131 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.352172 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.352184 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.352202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.352214 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.366736 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.379628 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.394184 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.410504 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.423825 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.439535 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.454607 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.455190 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.455225 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.455238 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.455258 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.455270 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.469765 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.486266 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.500843 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:58Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.558988 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.559061 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.559085 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.559106 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.559121 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.661924 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.661975 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.661985 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.662004 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.662014 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.765060 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.765116 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.765127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.765141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.765150 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.867971 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.868021 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.868031 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.868047 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.868057 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.931130 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 02:35:10.476222991 +0000 UTC Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.970839 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.970889 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.970901 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.970922 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:58 crc kubenswrapper[4925]: I0121 10:55:58.970934 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:58Z","lastTransitionTime":"2026-01-21T10:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.074178 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.074234 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.074247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.074265 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.074276 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.114813 4925 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.200919 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.200992 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.201006 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.201024 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.201037 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.303472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.303513 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.303525 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.303541 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.303552 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.405798 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.405840 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.405850 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.405863 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.405874 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.500834 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.500869 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:55:59 crc kubenswrapper[4925]: E0121 10:55:59.501330 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.500945 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:55:59 crc kubenswrapper[4925]: E0121 10:55:59.501499 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:55:59 crc kubenswrapper[4925]: E0121 10:55:59.501741 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.507898 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.507921 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.507930 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.507943 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.507953 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.515525 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.526767 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.539771 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.556804 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.567951 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.581358 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.594735 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.605640 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.610286 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.610449 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.610543 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.610669 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.610747 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.618889 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.630299 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.643122 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.654008 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.666728 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.687509 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.712283 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:55:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.815419 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.815730 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.815809 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.815876 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.815954 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.918514 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.918585 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.918598 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.918623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.918638 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:55:59Z","lastTransitionTime":"2026-01-21T10:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:55:59 crc kubenswrapper[4925]: I0121 10:55:59.931947 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 09:47:27.065209456 +0000 UTC Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.034699 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.034747 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.034756 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.034781 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.034791 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:00Z","lastTransitionTime":"2026-01-21T10:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.140803 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.141141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.141228 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.141307 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.141388 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:00Z","lastTransitionTime":"2026-01-21T10:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.244652 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.244886 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.245027 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.245169 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.245305 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:00Z","lastTransitionTime":"2026-01-21T10:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.409904 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.409955 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.409975 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.409993 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.410038 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:00Z","lastTransitionTime":"2026-01-21T10:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.514330 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.514373 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.514384 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.514413 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.514424 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:00Z","lastTransitionTime":"2026-01-21T10:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.527901 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.530474 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.530545 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.531879 4925 generic.go:334] "Generic (PLEG): container finished" podID="2b0b25f1-8430-459d-9805-e667615dc073" containerID="0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158" exitCode=0 Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.531908 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerDied","Data":"0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.551376 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.570888 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.590572 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.602468 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.615463 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.629504 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.644370 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.653581 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.662533 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.666851 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.667283 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.667295 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.667311 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.667322 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:00Z","lastTransitionTime":"2026-01-21T10:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.692453 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.706277 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.724793 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.738929 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.769941 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.769996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.770025 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.770043 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.770055 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:00Z","lastTransitionTime":"2026-01-21T10:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:00 crc kubenswrapper[4925]: I0121 10:56:00.771430 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.093599 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 03:13:52.180609139 +0000 UTC Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.100875 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.100938 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.100954 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.100973 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.100991 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:01Z","lastTransitionTime":"2026-01-21T10:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.115263 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.132857 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.154535 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.170101 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.187092 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.199775 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.205224 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.205277 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.205294 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.205325 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.205338 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:01Z","lastTransitionTime":"2026-01-21T10:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.217999 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.237621 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.256544 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.275163 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.422498 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.422545 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.422558 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.422576 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.422591 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:01Z","lastTransitionTime":"2026-01-21T10:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.444712 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.596542 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.596670 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:01 crc kubenswrapper[4925]: E0121 10:56:01.596719 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.596670 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:01 crc kubenswrapper[4925]: E0121 10:56:01.596897 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:01 crc kubenswrapper[4925]: E0121 10:56:01.597137 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.689747 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.689786 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.689794 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.689812 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.689822 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:01Z","lastTransitionTime":"2026-01-21T10:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.696185 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.766312 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.772525 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerStarted","Data":"77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3"} Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.773327 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.793428 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.794508 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.794547 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.794559 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.794575 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.794587 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:01Z","lastTransitionTime":"2026-01-21T10:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.820747 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.833519 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.835334 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.846965 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.861203 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.876450 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.889642 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.898040 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.898074 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.898084 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.898100 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.898111 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:01Z","lastTransitionTime":"2026-01-21T10:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.900684 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.918754 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.940716 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.953597 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:01 crc kubenswrapper[4925]: I0121 10:56:01.965542 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:01Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.012148 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.012184 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.012193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.012207 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.012219 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.021017 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.035917 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.050806 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.071019 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.088228 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.094004 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 19:42:07.906277675 +0000 UTC Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.106556 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.115119 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.115154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.115163 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.115177 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.115186 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.121975 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.220519 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.220586 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.220609 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.220632 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.220702 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.323490 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.323521 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.323529 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.323567 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.323584 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.427342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.427495 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.427515 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.427594 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.427621 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.453778 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql"] Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.454562 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: W0121 10:56:02.458001 4925 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert": failed to list *v1.Secret: secrets "ovn-control-plane-metrics-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Jan 21 10:56:02 crc kubenswrapper[4925]: W0121 10:56:02.458036 4925 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd": failed to list *v1.Secret: secrets "ovn-kubernetes-control-plane-dockercfg-gs7dd" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Jan 21 10:56:02 crc kubenswrapper[4925]: E0121 10:56:02.458082 4925 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-control-plane-metrics-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 10:56:02 crc kubenswrapper[4925]: E0121 10:56:02.458099 4925 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-gs7dd\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-kubernetes-control-plane-dockercfg-gs7dd\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.485088 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.520572 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tw4t\" (UniqueName: \"kubernetes.io/projected/a8599a6b-48cb-400d-ac34-86be75b9ce54-kube-api-access-9tw4t\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.521239 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8599a6b-48cb-400d-ac34-86be75b9ce54-env-overrides\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.521475 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.521629 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.532507 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.532621 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.532652 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.532727 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.532751 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.622172 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.622229 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.622249 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tw4t\" (UniqueName: \"kubernetes.io/projected/a8599a6b-48cb-400d-ac34-86be75b9ce54-kube-api-access-9tw4t\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.622275 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8599a6b-48cb-400d-ac34-86be75b9ce54-env-overrides\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.622352 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.623332 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a8599a6b-48cb-400d-ac34-86be75b9ce54-env-overrides\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.623471 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.637814 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.638076 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.638256 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.638423 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.638578 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.870769 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.871111 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.871255 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.871348 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.871477 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.893105 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tw4t\" (UniqueName: \"kubernetes.io/projected/a8599a6b-48cb-400d-ac34-86be75b9ce54-kube-api-access-9tw4t\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.901539 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.928269 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.941583 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.967040 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.974687 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.974722 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.974731 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.974746 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.974756 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:02Z","lastTransitionTime":"2026-01-21T10:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.980976 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:02 crc kubenswrapper[4925]: I0121 10:56:02.992738 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:02Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.007342 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.022223 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.032040 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.045686 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.057739 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.067907 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.077812 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.077870 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.077882 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.077902 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.077913 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.079974 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.091248 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.094681 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 22:14:51.452828079 +0000 UTC Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.180303 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.180349 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.180362 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.180377 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.180387 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.282750 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.282820 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.282841 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.282867 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.282886 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.386420 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.386492 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.386505 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.386526 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.386540 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.489697 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.489772 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.489782 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.489808 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.489821 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.501240 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.501284 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.501382 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:03 crc kubenswrapper[4925]: E0121 10:56:03.501465 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:03 crc kubenswrapper[4925]: E0121 10:56:03.501579 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:03 crc kubenswrapper[4925]: E0121 10:56:03.501756 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:03 crc kubenswrapper[4925]: E0121 10:56:03.749277 4925 secret.go:188] Couldn't get secret openshift-ovn-kubernetes/ovn-control-plane-metrics-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.749511 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.749669 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.749682 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.749700 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.749713 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:03 crc kubenswrapper[4925]: E0121 10:56:03.749875 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovn-control-plane-metrics-cert podName:a8599a6b-48cb-400d-ac34-86be75b9ce54 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:04.249847593 +0000 UTC m=+55.853739537 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ovn-control-plane-metrics-cert" (UniqueName: "kubernetes.io/secret/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovn-control-plane-metrics-cert") pod "ovnkube-control-plane-749d76644c-glmql" (UID: "a8599a6b-48cb-400d-ac34-86be75b9ce54") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.750415 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.835217 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-2txwq"] Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.836333 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:03 crc kubenswrapper[4925]: E0121 10:56:03.836523 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.849162 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.849576 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qwdn\" (UniqueName: \"kubernetes.io/projected/5c3596d1-1f08-4703-ab63-c29358aac0d9-kube-api-access-7qwdn\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.852742 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.852779 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.852794 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.852813 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.852826 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:03 crc kubenswrapper[4925]: I0121 10:56:03.896224 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.914927 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.930139 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.932950 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.946815 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.997219 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.997250 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.997259 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.997274 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:03.997294 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:03Z","lastTransitionTime":"2026-01-21T10:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.000165 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.013276 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.027936 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: E0121 10:56:04.042988 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:04 crc kubenswrapper[4925]: E0121 10:56:04.043094 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:04.543055316 +0000 UTC m=+56.146947250 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.043220 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.043256 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.043475 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qwdn\" (UniqueName: \"kubernetes.io/projected/5c3596d1-1f08-4703-ab63-c29358aac0d9-kube-api-access-7qwdn\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.055707 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.068110 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qwdn\" (UniqueName: \"kubernetes.io/projected/5c3596d1-1f08-4703-ab63-c29358aac0d9-kube-api-access-7qwdn\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.072348 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.083711 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.095570 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 06:28:25.830933642 +0000 UTC Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.099772 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.099836 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.099849 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.099866 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.100158 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.107436 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.120666 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.162627 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.175375 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.187171 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.199661 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:04Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.202908 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.202945 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.202956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.202973 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.202984 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.306170 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.306240 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.306252 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.306273 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.306282 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.345916 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.349591 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a8599a6b-48cb-400d-ac34-86be75b9ce54-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-glmql\" (UID: \"a8599a6b-48cb-400d-ac34-86be75b9ce54\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.408553 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.408624 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.408645 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.408707 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.408726 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.512532 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.512599 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.512611 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.512637 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.512651 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.549638 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:04 crc kubenswrapper[4925]: E0121 10:56:04.549903 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:04 crc kubenswrapper[4925]: E0121 10:56:04.550031 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:05.550009361 +0000 UTC m=+57.153901295 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.569692 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.615350 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.615443 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.615454 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.615474 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.615487 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.719306 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.719351 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.719360 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.719382 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.719412 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.822459 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.822587 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.822607 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.822675 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.822699 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:04Z","lastTransitionTime":"2026-01-21T10:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:04 crc kubenswrapper[4925]: I0121 10:56:04.880363 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" event={"ID":"a8599a6b-48cb-400d-ac34-86be75b9ce54","Type":"ContainerStarted","Data":"ecaa4b1cd416f4766855f1a3e8040904e1350e0d2c13756bd5e933ec911fd26a"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.077952 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.077987 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.077997 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.078011 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.078020 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.096225 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 14:55:02.896533614 +0000 UTC Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.181022 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.181494 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.181708 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.181816 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.181898 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.290136 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.290264 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.290277 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.290295 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.290307 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.407451 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.407834 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.407924 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.409368 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.409412 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.501011 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.501159 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.501032 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.501032 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:05 crc kubenswrapper[4925]: E0121 10:56:05.501301 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:05 crc kubenswrapper[4925]: E0121 10:56:05.501550 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:05 crc kubenswrapper[4925]: E0121 10:56:05.501752 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:05 crc kubenswrapper[4925]: E0121 10:56:05.501895 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.513221 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.513302 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.513320 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.513345 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.513364 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.608366 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:05 crc kubenswrapper[4925]: E0121 10:56:05.608555 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:05 crc kubenswrapper[4925]: E0121 10:56:05.608647 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:07.608616772 +0000 UTC m=+59.212508706 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.616101 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.616155 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.616168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.616186 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.616198 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.719758 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.719825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.719838 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.719862 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.719881 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.822909 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.822956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.822969 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.822990 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.823004 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.926037 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.926092 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.926111 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.926130 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:05 crc kubenswrapper[4925]: I0121 10:56:05.926143 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:05Z","lastTransitionTime":"2026-01-21T10:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.028600 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.028673 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.028886 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.029048 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.029067 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.097915 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 02:51:50.809269507 +0000 UTC Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.131992 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.132065 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.132083 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.132112 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.132131 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.235182 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.235223 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.235233 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.235247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.235256 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.337481 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.337529 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.337541 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.337558 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.337575 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.440514 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.440574 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.440587 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.440608 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.440623 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.544069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.544129 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.544145 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.544165 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.544180 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.647842 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.647883 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.647893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.647907 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.647916 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.750825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.750864 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.750876 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.750891 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.750901 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.889561 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.889615 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.889623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.889643 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.889653 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.992327 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.992385 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.992430 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.992455 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:06 crc kubenswrapper[4925]: I0121 10:56:06.992474 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:06Z","lastTransitionTime":"2026-01-21T10:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.095956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.096012 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.096026 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.096044 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.096053 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.098052 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 15:36:35.307486433 +0000 UTC Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.198204 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.198247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.231824 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.231861 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.231876 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.339717 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.339754 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.339765 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.339781 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.339791 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.443067 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.443109 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.443118 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.443135 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.443146 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.503962 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.504145 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.504609 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.504656 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.504689 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.504744 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.504787 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.504845 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.546247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.546279 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.546288 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.546302 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.546314 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.639325 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.639811 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.640146 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:11.639887058 +0000 UTC m=+63.243778992 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.649622 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.649679 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.649909 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.649947 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.649964 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.753896 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.753956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.753972 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.753995 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.754008 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.807612 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.807660 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.807679 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.807699 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.807711 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.890994 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:07Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.896782 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" event={"ID":"a8599a6b-48cb-400d-ac34-86be75b9ce54","Type":"ContainerStarted","Data":"7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f"} Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.898944 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.898986 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.898996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.899015 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.899028 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.912786 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:07Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.918659 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.918722 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.918736 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.918755 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.918767 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:07 crc kubenswrapper[4925]: E0121 10:56:07.945837 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:07Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.953715 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.953766 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.953777 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.953794 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:07 crc kubenswrapper[4925]: I0121 10:56:07.953805 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:07Z","lastTransitionTime":"2026-01-21T10:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: E0121 10:56:08.066787 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:08Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.070205 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.070267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.070281 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.070304 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.070318 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: E0121 10:56:08.084219 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:08Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:08 crc kubenswrapper[4925]: E0121 10:56:08.084549 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.086174 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.086233 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.086245 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.086267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.086280 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.098432 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 11:12:42.323945333 +0000 UTC Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.191488 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.191531 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.191546 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.191571 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.191589 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.324719 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.324745 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.324753 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.324767 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.324778 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.592415 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.592483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.592497 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.592519 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.592538 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.695846 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.695899 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.695910 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.695927 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.695936 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.799472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.799537 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.799549 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.799571 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.799587 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.901433 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.901475 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.901487 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.901502 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:08 crc kubenswrapper[4925]: I0121 10:56:08.901511 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:08Z","lastTransitionTime":"2026-01-21T10:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.005075 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.005146 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.005163 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.005183 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.005217 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.098908 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 16:08:24.699278679 +0000 UTC Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.108220 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.108285 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.108301 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.108319 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.108351 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.212930 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.212971 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.212982 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.213000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.213013 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.316821 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.316871 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.316884 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.316902 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.316915 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.432606 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.433124 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.433135 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.433151 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.433161 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.501102 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.501146 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.501183 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.501256 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:09 crc kubenswrapper[4925]: E0121 10:56:09.501258 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:09 crc kubenswrapper[4925]: E0121 10:56:09.501337 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:09 crc kubenswrapper[4925]: E0121 10:56:09.501489 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:09 crc kubenswrapper[4925]: E0121 10:56:09.501600 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.550834 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.556042 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.556318 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.556434 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.556551 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.556641 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.589796 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.678898 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.679185 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.679270 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.679347 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.679436 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.703860 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.716221 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.735915 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.759734 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.775134 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.782125 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.782179 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.782190 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.782206 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.782217 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.794210 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.812457 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.825329 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.841525 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.867032 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.881538 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.909522 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.925218 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.925268 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.925280 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.925302 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:09 crc kubenswrapper[4925]: I0121 10:56:09.925316 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:09Z","lastTransitionTime":"2026-01-21T10:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:09.930060 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:09.931901 4925 generic.go:334] "Generic (PLEG): container finished" podID="2b0b25f1-8430-459d-9805-e667615dc073" containerID="77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3" exitCode=0 Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:09.931977 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerDied","Data":"77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:09.936646 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" event={"ID":"a8599a6b-48cb-400d-ac34-86be75b9ce54","Type":"ContainerStarted","Data":"e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:09.965039 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:09.987974 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:09Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.015829 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.051995 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.052062 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.052076 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.052100 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.052112 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.053049 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.071641 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.091210 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.099030 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 04:18:59.015216635 +0000 UTC Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.109746 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.128299 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.153612 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.156880 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.156916 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.156927 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.156944 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.156956 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.171420 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.194337 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.208750 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.221623 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.238287 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.253909 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.259416 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.259464 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.259474 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.259492 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.259503 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.265767 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.282166 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.295455 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.313152 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:10Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.376376 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.376438 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.376449 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.376465 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.376477 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.479123 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.479169 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.479179 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.479197 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.479209 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.581979 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.582029 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.582044 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.582070 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.582085 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.685653 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.685778 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.685794 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.685825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.685840 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.789110 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.789160 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.789170 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.789188 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.789203 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.892371 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.892471 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.892488 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.892519 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:10 crc kubenswrapper[4925]: I0121 10:56:10.892533 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:10Z","lastTransitionTime":"2026-01-21T10:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.022690 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.022765 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.022780 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.022799 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.022813 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.099818 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 09:08:50.968850709 +0000 UTC Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.125984 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.126062 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.126082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.126114 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.126142 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.228709 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.228787 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.228825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.228847 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.228860 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.332127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.332214 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.332231 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.332257 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.332274 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.435698 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.436050 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.436162 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.436291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.436408 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.501300 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.501344 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.501431 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:11 crc kubenswrapper[4925]: E0121 10:56:11.501794 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.501458 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:11 crc kubenswrapper[4925]: E0121 10:56:11.501920 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:11 crc kubenswrapper[4925]: E0121 10:56:11.501774 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:11 crc kubenswrapper[4925]: E0121 10:56:11.502181 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.538960 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.539032 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.539044 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.539059 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.539071 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.641870 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:11 crc kubenswrapper[4925]: E0121 10:56:11.642201 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:11 crc kubenswrapper[4925]: E0121 10:56:11.642366 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:19.642329802 +0000 UTC m=+71.246221766 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.642581 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.642628 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.642679 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.642707 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.642725 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.745347 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.745418 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.745431 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.745445 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.745458 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.821643 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.834444 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.837621 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.848160 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.848205 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.848220 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.848239 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.848251 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.855844 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.868141 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.884596 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.900414 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.914182 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.925230 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.938658 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.945880 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/0.log" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.948805 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49" exitCode=1 Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.948876 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.950048 4925 scope.go:117] "RemoveContainer" containerID="ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.951244 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.951314 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.951332 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.951359 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.951377 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:11Z","lastTransitionTime":"2026-01-21T10:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.957815 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.958098 4925 generic.go:334] "Generic (PLEG): container finished" podID="2b0b25f1-8430-459d-9805-e667615dc073" containerID="588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024" exitCode=0 Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.958230 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerDied","Data":"588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024"} Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.978585 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:11 crc kubenswrapper[4925]: I0121 10:56:11.996518 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:11Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.012586 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.035454 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.050167 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.054533 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.054589 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.054600 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.054619 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.054630 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.072728 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.087524 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.098865 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.099968 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 08:16:29.85899579 +0000 UTC Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.120107 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.135933 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.151473 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.157987 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.158057 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.158070 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.158091 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.158102 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.172458 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.199822 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"message\\\":\\\" 6082 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:10.160649 6082 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 10:56:10.160727 6082 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 10:56:10.161692 6082 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:10.161770 6082 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:10.161825 6082 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:10.161826 6082 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 10:56:10.161896 6082 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:10.161933 6082 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 10:56:10.161961 6082 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 10:56:10.161947 6082 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:10.161999 6082 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 10:56:10.162067 6082 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:10.162079 6082 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 10:56:10.162121 6082 factory.go:656] Stopping watch factory\\\\nI0121 10:56:10.162180 6082 ovnkube.go:599] Stopped ovnkube\\\\nI0121 10:56:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.220326 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.237260 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.249004 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.262877 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.262934 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.262948 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.262967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.262978 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.265880 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.277727 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.289449 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.302961 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.315520 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.330006 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.343954 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.359481 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.366573 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.366632 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.366647 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.366673 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.366688 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.374208 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.388645 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.469291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.469324 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.469332 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.469346 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.469355 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.572196 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.572243 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.572255 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.572273 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.572286 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.719618 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.719691 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.719706 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.720098 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.720135 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.824216 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.824279 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.824293 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.824346 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.824361 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.927621 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.927687 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.927699 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.927719 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.927732 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:12Z","lastTransitionTime":"2026-01-21T10:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.967465 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" event={"ID":"2b0b25f1-8430-459d-9805-e667615dc073","Type":"ContainerStarted","Data":"2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.970153 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/0.log" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.973669 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27"} Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.974144 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:56:12 crc kubenswrapper[4925]: I0121 10:56:12.986261 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.002953 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:12Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.023202 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.030203 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.030234 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.030242 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.030258 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.030268 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.039675 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.054047 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.072720 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.087543 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.100916 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 18:31:54.273012233 +0000 UTC Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.102452 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.117306 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.133736 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.133848 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.133883 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.133930 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.133955 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.138943 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.154625 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.167683 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.200889 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"message\\\":\\\" 6082 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:10.160649 6082 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 10:56:10.160727 6082 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 10:56:10.161692 6082 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:10.161770 6082 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:10.161825 6082 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:10.161826 6082 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 10:56:10.161896 6082 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:10.161933 6082 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 10:56:10.161961 6082 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 10:56:10.161947 6082 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:10.161999 6082 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 10:56:10.162067 6082 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:10.162079 6082 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 10:56:10.162121 6082 factory.go:656] Stopping watch factory\\\\nI0121 10:56:10.162180 6082 ovnkube.go:599] Stopped ovnkube\\\\nI0121 10:56:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.213266 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.236857 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.236913 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.236924 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.236943 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.236958 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.240071 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.254040 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.269064 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.284311 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.305263 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.321140 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.340113 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.340196 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.340210 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.340232 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.340247 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.341706 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.355791 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.378290 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.399535 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.416371 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.434127 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.442905 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.442980 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.443007 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.443033 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.443047 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.456990 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.473717 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.547964 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.560370 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.571544 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.571690 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.571796 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.571872 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.571915 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.572047 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.572112 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.572035 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.573509 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.573576 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.573586 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.573609 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.573626 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.576894 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.606668 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.618243 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.628723 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.650780 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.668054 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"message\\\":\\\" 6082 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:10.160649 6082 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 10:56:10.160727 6082 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 10:56:10.161692 6082 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:10.161770 6082 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:10.161825 6082 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:10.161826 6082 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 10:56:10.161896 6082 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:10.161933 6082 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 10:56:10.161961 6082 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 10:56:10.161947 6082 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:10.161999 6082 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 10:56:10.162067 6082 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:10.162079 6082 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 10:56:10.162121 6082 factory.go:656] Stopping watch factory\\\\nI0121 10:56:10.162180 6082 ovnkube.go:599] Stopped ovnkube\\\\nI0121 10:56:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.676082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.676251 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.676346 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.676504 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.676617 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.779080 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.779354 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.779464 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.779670 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.779763 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.874765 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.874926 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:56:45.87490676 +0000 UTC m=+97.478798694 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.875380 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.875620 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.875752 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.875993 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.875487 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.876235 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:45.876224444 +0000 UTC m=+97.480116378 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.875720 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.876440 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.876555 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.876675 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:45.876661048 +0000 UTC m=+97.480552982 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.875896 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.876849 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.876932 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.876111 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.877067 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:45.877054161 +0000 UTC m=+97.480946095 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:56:13 crc kubenswrapper[4925]: E0121 10:56:13.877185 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:45.877169035 +0000 UTC m=+97.481060969 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.882259 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.882411 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.882497 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.882612 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:13 crc kubenswrapper[4925]: I0121 10:56:13.882705 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:13Z","lastTransitionTime":"2026-01-21T10:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.013415 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.013479 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.013493 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.013519 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.013534 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.102089 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 15:43:21.017247016 +0000 UTC Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.116189 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.116316 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.116429 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.116510 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.116586 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.219691 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.219732 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.219778 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.219796 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.219811 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.322235 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.322539 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.322623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.322711 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.322873 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.426318 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.426362 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.426372 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.426412 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.426422 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.528645 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.528676 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.528686 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.528701 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.528711 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.631112 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.631153 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.631165 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.631182 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.631194 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.734894 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.734967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.734981 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.735000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.735011 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.837983 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.838036 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.838048 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.838065 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.838076 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.941056 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.941091 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.941100 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.941114 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.941123 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:14Z","lastTransitionTime":"2026-01-21T10:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.981628 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/1.log" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.982296 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/0.log" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.984508 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27" exitCode=1 Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.984541 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27"} Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.984578 4925 scope.go:117] "RemoveContainer" containerID="ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49" Jan 21 10:56:14 crc kubenswrapper[4925]: I0121 10:56:14.986612 4925 scope.go:117] "RemoveContainer" containerID="ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27" Jan 21 10:56:14 crc kubenswrapper[4925]: E0121 10:56:14.986859 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.027240 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.041236 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.043615 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.043658 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.043672 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.043689 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.043704 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.061662 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.078375 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.102265 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"message\\\":\\\" 6082 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:10.160649 6082 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 10:56:10.160727 6082 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 10:56:10.161692 6082 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:10.161770 6082 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:10.161825 6082 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:10.161826 6082 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 10:56:10.161896 6082 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:10.161933 6082 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 10:56:10.161961 6082 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 10:56:10.161947 6082 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:10.161999 6082 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 10:56:10.162067 6082 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:10.162079 6082 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 10:56:10.162121 6082 factory.go:656] Stopping watch factory\\\\nI0121 10:56:10.162180 6082 ovnkube.go:599] Stopped ovnkube\\\\nI0121 10:56:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:14Z\\\",\\\"message\\\":\\\"ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0121 10:56:13.455275 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.102662 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 09:56:47.395142565 +0000 UTC Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.120016 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.135852 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.147114 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.147154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.147167 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.147186 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.147199 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.152947 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.173270 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.195587 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.216100 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.237704 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.250820 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.250866 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.250879 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.250897 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.250910 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.258092 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.278898 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.297997 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.315979 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.333204 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.348440 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:15Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.353562 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.353593 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.353602 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.353617 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.353629 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.456245 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.456290 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.456304 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.456320 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.456333 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.500978 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.501051 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:15 crc kubenswrapper[4925]: E0121 10:56:15.501203 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:15 crc kubenswrapper[4925]: E0121 10:56:15.501190 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.501387 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:15 crc kubenswrapper[4925]: E0121 10:56:15.501786 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.501959 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:15 crc kubenswrapper[4925]: E0121 10:56:15.502054 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.558930 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.558980 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.558993 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.559011 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.559025 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.662201 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.662292 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.662303 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.662321 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.662332 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.764994 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.765038 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.765051 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.765069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.765081 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.867445 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.867491 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.867501 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.867516 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.867526 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.969675 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.969715 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.969723 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.969741 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.969754 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:15Z","lastTransitionTime":"2026-01-21T10:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:15 crc kubenswrapper[4925]: I0121 10:56:15.990216 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/1.log" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.071729 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.071766 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.071774 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.071791 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.071802 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.103829 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 16:05:24.621679179 +0000 UTC Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.174240 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.174290 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.174309 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.174327 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.174337 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.276927 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.276963 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.276973 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.277000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.277014 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.379107 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.379164 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.379180 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.379202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.379214 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.481832 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.481879 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.481893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.481911 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.481925 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.584307 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.584345 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.584354 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.584370 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.584380 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.687166 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.687213 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.687223 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.687238 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.687250 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.789643 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.789892 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.789966 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.790075 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.790143 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.893360 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.894959 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.895370 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.895665 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.895895 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.999131 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.999190 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.999202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.999222 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:16 crc kubenswrapper[4925]: I0121 10:56:16.999236 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:16Z","lastTransitionTime":"2026-01-21T10:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.101434 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.101689 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.101754 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.101823 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.101886 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.104603 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 05:28:08.441466566 +0000 UTC Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.205562 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.205602 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.205611 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.205625 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.205637 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.308259 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.309000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.309069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.309176 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.309253 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.411869 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.412154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.412322 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.412438 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.412546 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.501518 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.501591 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.501721 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:17 crc kubenswrapper[4925]: E0121 10:56:17.501855 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:17 crc kubenswrapper[4925]: E0121 10:56:17.501992 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:17 crc kubenswrapper[4925]: E0121 10:56:17.502081 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.502609 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:17 crc kubenswrapper[4925]: E0121 10:56:17.503025 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.516570 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.516634 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.516650 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.516669 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.516683 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.619986 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.620312 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.620477 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.620571 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.620664 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.722927 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.722990 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.723004 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.723024 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.723039 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.825689 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.825738 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.825750 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.825769 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.825781 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.929301 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.929361 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.929370 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.929423 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:17 crc kubenswrapper[4925]: I0121 10:56:17.929436 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:17Z","lastTransitionTime":"2026-01-21T10:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.032276 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.032331 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.032342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.032367 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.032385 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.105311 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 22:33:21.190252217 +0000 UTC Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.135054 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.135105 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.135119 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.135141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.135154 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.177917 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.177958 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.177970 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.178010 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.178022 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: E0121 10:56:18.193131 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:18Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.198469 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.198501 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.198514 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.198531 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.198542 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: E0121 10:56:18.209477 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:18Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.213171 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.213215 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.213225 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.213241 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.213253 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: E0121 10:56:18.226758 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:18Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.230852 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.231609 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.231631 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.231659 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.231687 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: E0121 10:56:18.248766 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:18Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.254103 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.254228 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.254240 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.254260 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.254270 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: E0121 10:56:18.266844 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:18Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:18 crc kubenswrapper[4925]: E0121 10:56:18.266977 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.268859 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.268888 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.268899 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.268915 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.268926 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.371750 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.371809 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.371820 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.371835 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.371845 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.474490 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.474530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.474542 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.474562 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.474575 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.577902 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.578296 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.578434 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.578566 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.578683 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.682111 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.682739 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.682837 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.682938 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.683062 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.786420 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.786770 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.786866 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.786973 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.787075 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.889855 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.889898 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.889923 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.889942 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.889953 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.993119 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.993182 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.993198 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.993217 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:18 crc kubenswrapper[4925]: I0121 10:56:18.993229 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:18Z","lastTransitionTime":"2026-01-21T10:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.096842 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.096888 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.096900 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.096915 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.096927 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.105419 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 14:03:42.590161999 +0000 UTC Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.200007 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.200051 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.200063 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.200079 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.200091 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.303118 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.303168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.303177 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.303200 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.303212 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.405736 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.405820 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.405835 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.405855 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.405869 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.501672 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.501818 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.501869 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.501996 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:19 crc kubenswrapper[4925]: E0121 10:56:19.502004 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:19 crc kubenswrapper[4925]: E0121 10:56:19.502172 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:19 crc kubenswrapper[4925]: E0121 10:56:19.502293 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:19 crc kubenswrapper[4925]: E0121 10:56:19.502443 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.509284 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.509323 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.509332 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.509349 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.509359 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.517986 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.533844 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.552418 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.573577 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.588558 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.604917 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.612689 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.612737 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.612751 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.612770 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.612785 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.621209 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.640581 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.643373 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:19 crc kubenswrapper[4925]: E0121 10:56:19.643710 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:19 crc kubenswrapper[4925]: E0121 10:56:19.643830 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:56:35.643799977 +0000 UTC m=+87.247691911 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.656406 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.673106 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.722473 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.722780 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.722856 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.722950 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.723126 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.741700 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.761130 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.781587 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.800844 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.826155 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.826217 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.826228 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.826249 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.826263 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.834770 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"message\\\":\\\" 6082 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:10.160649 6082 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 10:56:10.160727 6082 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 10:56:10.161692 6082 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:10.161770 6082 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:10.161825 6082 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:10.161826 6082 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 10:56:10.161896 6082 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:10.161933 6082 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 10:56:10.161961 6082 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 10:56:10.161947 6082 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:10.161999 6082 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 10:56:10.162067 6082 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:10.162079 6082 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 10:56:10.162121 6082 factory.go:656] Stopping watch factory\\\\nI0121 10:56:10.162180 6082 ovnkube.go:599] Stopped ovnkube\\\\nI0121 10:56:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:14Z\\\",\\\"message\\\":\\\"ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0121 10:56:13.455275 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.854865 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.874306 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.893248 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:19Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.930162 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.930230 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.930243 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.930268 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:19 crc kubenswrapper[4925]: I0121 10:56:19.930282 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:19Z","lastTransitionTime":"2026-01-21T10:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.033188 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.033720 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.033819 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.033917 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.034003 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.204241 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 18:45:19.469588719 +0000 UTC Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.207331 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.207405 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.207419 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.207440 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.207452 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.311561 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.311669 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.311682 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.311700 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.311713 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.413914 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.413949 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.413958 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.413971 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.413982 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.517225 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.517279 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.517292 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.517310 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.517322 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.621090 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.621495 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.621648 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.621757 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.621869 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.724833 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.724909 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.724927 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.724952 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.724967 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.859261 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.859352 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.859382 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.859441 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.859458 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.963795 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.963860 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.963873 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.963898 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:20 crc kubenswrapper[4925]: I0121 10:56:20.963915 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:20Z","lastTransitionTime":"2026-01-21T10:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.067167 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.067259 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.067275 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.067315 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.067339 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.171094 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.171181 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.171193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.171236 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.171250 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.204521 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 07:02:00.673961268 +0000 UTC Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.274440 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.274499 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.274510 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.274530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.274545 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.377823 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.378711 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.378778 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.378812 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.378830 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.533589 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.533587 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.533785 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.533828 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:21 crc kubenswrapper[4925]: E0121 10:56:21.534065 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:21 crc kubenswrapper[4925]: E0121 10:56:21.534198 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:21 crc kubenswrapper[4925]: E0121 10:56:21.534459 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:21 crc kubenswrapper[4925]: E0121 10:56:21.534657 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.535325 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.535373 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.535382 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.535415 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.535426 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.639051 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.639141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.639170 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.639200 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.639214 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.743065 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.743158 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.743174 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.743200 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.743216 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.848124 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.848192 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.848204 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.848247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.848263 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.952150 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.952417 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.952438 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.952463 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:21 crc kubenswrapper[4925]: I0121 10:56:21.952475 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:21Z","lastTransitionTime":"2026-01-21T10:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.056268 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.056332 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.056342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.056380 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.056418 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.159604 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.159664 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.159678 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.159702 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.159718 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.205652 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 17:55:58.380808695 +0000 UTC Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.263941 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.264002 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.264016 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.264041 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.264060 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.367522 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.367580 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.367592 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.367613 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.367626 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.471621 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.471676 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.471687 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.471706 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.471720 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.575651 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.575734 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.575747 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.575771 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.575789 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.679481 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.679530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.679544 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.679567 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.679579 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.783718 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.783781 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.783796 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.783818 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.783831 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.887634 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.887696 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.887712 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.887732 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.887743 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.991783 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.991848 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.991860 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.991880 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:22 crc kubenswrapper[4925]: I0121 10:56:22.991892 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:22Z","lastTransitionTime":"2026-01-21T10:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.096054 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.096140 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.096164 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.096191 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.096203 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.199944 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.200025 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.200045 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.200070 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.200087 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.206461 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 04:36:31.353493273 +0000 UTC Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.303638 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.303754 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.303771 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.303806 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.303823 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.410851 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.411446 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.411666 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.411841 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.411970 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.501304 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.501428 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:23 crc kubenswrapper[4925]: E0121 10:56:23.501609 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.501864 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.501895 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:23 crc kubenswrapper[4925]: E0121 10:56:23.501947 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:23 crc kubenswrapper[4925]: E0121 10:56:23.502095 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:23 crc kubenswrapper[4925]: E0121 10:56:23.503126 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.514837 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.514901 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.514913 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.514928 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.514938 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.618189 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.618240 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.618252 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.618272 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.618285 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.722243 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.722307 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.722359 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.722414 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.722430 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.829187 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.829250 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.829265 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.829290 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.829313 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.933871 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.933934 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.933943 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.933960 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:23 crc kubenswrapper[4925]: I0121 10:56:23.933971 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:23Z","lastTransitionTime":"2026-01-21T10:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.039665 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.039741 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.039757 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.039780 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.039796 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.143716 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.143793 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.143841 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.143862 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.143875 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.207060 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 13:08:25.003379184 +0000 UTC Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.248524 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.248602 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.248618 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.248657 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.248674 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.354576 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.354658 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.354675 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.354707 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.354728 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.460977 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.461050 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.461061 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.461082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.461097 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.565418 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.565516 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.565532 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.565574 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.565594 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.670062 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.670124 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.670139 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.670157 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.670172 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.774368 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.774451 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.774463 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.774531 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.774549 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.878498 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.878552 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.878564 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.878583 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.878598 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.981811 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.981920 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.981943 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.981967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:24 crc kubenswrapper[4925]: I0121 10:56:24.981981 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:24Z","lastTransitionTime":"2026-01-21T10:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.085437 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.085509 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.085522 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.085549 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.085565 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.188536 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.188611 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.188625 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.188644 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.188657 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.208751 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-06 22:31:01.596655901 +0000 UTC Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.292530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.292656 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.292676 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.292712 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.292730 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.396340 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.396427 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.396440 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.396459 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.396474 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.529175 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.529349 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.529128 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:25 crc kubenswrapper[4925]: E0121 10:56:25.529589 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.529700 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:25 crc kubenswrapper[4925]: E0121 10:56:25.529834 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:25 crc kubenswrapper[4925]: E0121 10:56:25.530040 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:25 crc kubenswrapper[4925]: E0121 10:56:25.530306 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.532095 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.532134 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.532146 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.532169 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.532182 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.635796 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.635866 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.635880 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.635898 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.635910 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.739896 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.740023 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.740036 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.740065 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.740079 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.843138 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.843193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.843202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.843221 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.843237 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.946860 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.946913 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.946933 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.946956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:25 crc kubenswrapper[4925]: I0121 10:56:25.946970 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:25Z","lastTransitionTime":"2026-01-21T10:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.049697 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.049771 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.049792 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.049821 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.049854 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.153737 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.153809 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.153830 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.153856 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.153874 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.209010 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 21:20:36.121503293 +0000 UTC Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.258470 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.258556 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.258571 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.258598 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.258614 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.362173 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.362228 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.362240 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.362261 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.362277 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.465597 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.465650 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.465663 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.465678 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.465688 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.569567 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.569664 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.569679 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.569699 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.569712 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.672522 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.672587 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.672598 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.672618 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.672635 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.777112 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.777218 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.777236 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.777261 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.777276 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.881909 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.881948 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.881960 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.881978 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.881993 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.985215 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.985347 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.985359 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.985383 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:26 crc kubenswrapper[4925]: I0121 10:56:26.985433 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:26Z","lastTransitionTime":"2026-01-21T10:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.089971 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.090049 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.090067 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.090097 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.090114 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.194431 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.194483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.194495 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.194515 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.194528 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.209857 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 10:02:20.906670376 +0000 UTC Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.298198 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.298260 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.298276 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.298298 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.298310 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.403139 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.403664 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.403828 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.403957 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.404063 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.501491 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:27 crc kubenswrapper[4925]: E0121 10:56:27.501728 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.501491 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:27 crc kubenswrapper[4925]: E0121 10:56:27.501835 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.501520 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:27 crc kubenswrapper[4925]: E0121 10:56:27.501901 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.501491 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:27 crc kubenswrapper[4925]: E0121 10:56:27.501960 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.508031 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.508104 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.508116 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.508136 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.508148 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.611083 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.611673 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.611807 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.611926 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.612056 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.715345 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.715428 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.715444 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.715466 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.715484 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.820803 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.820876 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.820889 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.820916 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.820933 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.926071 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.926154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.926177 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.926206 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:27 crc kubenswrapper[4925]: I0121 10:56:27.926224 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:27Z","lastTransitionTime":"2026-01-21T10:56:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.030499 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.030569 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.030594 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.030621 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.030652 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.135092 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.135151 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.135171 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.135193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.135209 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.211592 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 22:47:48.127939816 +0000 UTC Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.240034 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.240095 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.240106 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.240127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.240143 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.343856 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.343912 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.343928 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.343947 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.343961 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.447979 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.448070 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.448083 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.448106 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.448122 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.551905 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.552053 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.552096 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.552129 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.552149 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.562474 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.562551 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.562563 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.562591 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.562606 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: E0121 10:56:28.582774 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:28Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.590304 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.590389 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.590439 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.590469 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.590496 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: E0121 10:56:28.610566 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:28Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.616204 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.616254 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.616264 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.616283 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.616300 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: E0121 10:56:28.633269 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:28Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.641960 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.642029 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.642043 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.642068 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.642083 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: E0121 10:56:28.662108 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:28Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.667918 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.667973 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.667986 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.668006 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.668021 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: E0121 10:56:28.802699 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:28Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:28 crc kubenswrapper[4925]: E0121 10:56:28.802924 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.809085 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.809191 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.809219 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.809245 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.809262 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.913949 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.914034 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.914049 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.914073 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:28 crc kubenswrapper[4925]: I0121 10:56:28.914092 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:28Z","lastTransitionTime":"2026-01-21T10:56:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.018168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.018228 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.018245 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.018279 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.018305 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.122154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.122224 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.122234 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.122253 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.122266 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.212638 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 16:27:50.736148984 +0000 UTC Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.226374 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.226527 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.226545 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.226568 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.226583 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.331558 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.331616 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.331635 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.331657 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.331677 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.436719 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.436853 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.436871 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.436905 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.436923 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.501640 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:29 crc kubenswrapper[4925]: E0121 10:56:29.501933 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.502763 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.502836 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:29 crc kubenswrapper[4925]: E0121 10:56:29.502877 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.502736 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:29 crc kubenswrapper[4925]: E0121 10:56:29.503041 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.503372 4925 scope.go:117] "RemoveContainer" containerID="ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27" Jan 21 10:56:29 crc kubenswrapper[4925]: E0121 10:56:29.503470 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.526935 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.546112 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.546176 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.546190 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.546211 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.546228 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.547566 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.566941 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.595093 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca6224270ff736309134a147ddf6da89adef74e52f60ec97f74169d8aa310a49\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"message\\\":\\\" 6082 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:10.160649 6082 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 10:56:10.160727 6082 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 10:56:10.161692 6082 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:10.161770 6082 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:10.161825 6082 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:10.161826 6082 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 10:56:10.161896 6082 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:10.161933 6082 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 10:56:10.161961 6082 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 10:56:10.161947 6082 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:10.161999 6082 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 10:56:10.162067 6082 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:10.162079 6082 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 10:56:10.162121 6082 factory.go:656] Stopping watch factory\\\\nI0121 10:56:10.162180 6082 ovnkube.go:599] Stopped ovnkube\\\\nI0121 10:56:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:14Z\\\",\\\"message\\\":\\\"ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0121 10:56:13.455275 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.615370 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.641544 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.651313 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.651377 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.651410 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.651438 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.651454 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.693965 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.738828 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.754836 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.754903 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.754914 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.754932 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.754945 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.767365 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.811283 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.838358 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.858643 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.859265 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.859299 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.859331 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.859347 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.863080 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.881924 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.902759 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.921261 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.942222 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.963749 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.963805 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.963814 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.963832 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.963846 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:29Z","lastTransitionTime":"2026-01-21T10:56:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.963852 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:29 crc kubenswrapper[4925]: I0121 10:56:29.986087 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:29Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.010204 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.043150 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:14Z\\\",\\\"message\\\":\\\"ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0121 10:56:13.455275 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.064025 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.066553 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.066610 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.066623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.066647 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.066660 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.073203 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/1.log" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.077233 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.099500 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.123675 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.243307 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 09:45:55.40388908 +0000 UTC Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.252782 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.252853 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.252868 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.252896 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.252910 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.254103 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.281384 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.302076 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.320488 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.345807 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.357373 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.357468 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.357482 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.357508 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.357524 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.365692 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.390462 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.419726 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.444529 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.461325 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.461383 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.461417 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.461441 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.461465 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.472569 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.499552 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.523068 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.546030 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:30Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.567016 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.567085 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.567100 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.567123 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.567142 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.670078 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.670127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.670138 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.670156 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.670172 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.773906 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.773951 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.773966 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.773984 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.773996 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.877234 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.877291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.877303 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.877323 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.877336 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.980967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.981038 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.981050 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.981071 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:30 crc kubenswrapper[4925]: I0121 10:56:30.981085 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:30Z","lastTransitionTime":"2026-01-21T10:56:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.089220 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.090805 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.090842 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.090857 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.090877 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.090892 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.106986 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.131481 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.152331 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.172918 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.195558 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.195673 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.195702 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.195749 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.195784 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.200029 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.224853 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.245094 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 19:41:34.749114855 +0000 UTC Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.259648 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.284689 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.303514 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.304297 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.304364 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.304387 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.304432 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.304445 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.331650 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.347388 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.365523 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.384526 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.408133 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.408204 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.408220 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.408246 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.408264 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.414312 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.431188 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.450088 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.471813 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.493866 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:14Z\\\",\\\"message\\\":\\\"ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0121 10:56:13.455275 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:31Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.500900 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.501048 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.500939 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.501097 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:31 crc kubenswrapper[4925]: E0121 10:56:31.501090 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:31 crc kubenswrapper[4925]: E0121 10:56:31.501289 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:31 crc kubenswrapper[4925]: E0121 10:56:31.501752 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:31 crc kubenswrapper[4925]: E0121 10:56:31.501854 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.510952 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.511013 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.511025 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.511056 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.511072 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.613926 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.613989 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.614006 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.614033 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.614047 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.717902 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.717973 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.717983 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.718004 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.718016 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.820160 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.820211 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.820223 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.820238 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.820251 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.923125 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.923188 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.923197 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.923231 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:31 crc kubenswrapper[4925]: I0121 10:56:31.923240 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:31Z","lastTransitionTime":"2026-01-21T10:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.027003 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.027051 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.027060 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.027078 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.027089 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.095346 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/2.log" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.096480 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/1.log" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.100999 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba" exitCode=1 Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.101091 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.101634 4925 scope.go:117] "RemoveContainer" containerID="ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.103000 4925 scope.go:117] "RemoveContainer" containerID="61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba" Jan 21 10:56:32 crc kubenswrapper[4925]: E0121 10:56:32.103483 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.123786 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.136327 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.136386 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.136420 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.136439 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.136452 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.154793 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.169097 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.187804 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.202913 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.217569 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.231908 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.239569 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.239614 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.239623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.239636 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.239645 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.246169 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 20:04:11.409699707 +0000 UTC Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.246311 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.264497 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.280865 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.299247 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.315615 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.333495 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.342060 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.342128 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.342141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.342182 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.342194 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.357847 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac93ab1749118bd52f0b7f92db8cfa837936c94a9ba3918a7c2df69465ae5c27\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:14Z\\\",\\\"message\\\":\\\"ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/redhat-operators]} name:Service_openshift-marketplace/redhat-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.138:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {97419c58-41c7-41d7-a137-a446f0c7eeb3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0121 10:56:13.455275 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:13Z is after 2025\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.374975 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.404051 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.422607 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.440649 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:32Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.445150 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.445202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.445214 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.445232 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.445244 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.548180 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.548236 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.548247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.548264 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.548276 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.650920 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.650967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.650980 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.650996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.651007 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.754344 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.754438 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.754454 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.754475 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.754489 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.856873 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.856932 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.856949 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.856970 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.856984 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.959760 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.959825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.959839 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.959859 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:32 crc kubenswrapper[4925]: I0121 10:56:32.959879 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:32Z","lastTransitionTime":"2026-01-21T10:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.062290 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.062333 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.062342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.062358 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.062371 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.106916 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/2.log" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.111865 4925 scope.go:117] "RemoveContainer" containerID="61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba" Jan 21 10:56:33 crc kubenswrapper[4925]: E0121 10:56:33.112219 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.125994 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.142938 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.160045 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.165161 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.165222 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.165240 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.165262 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.165280 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.178915 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.199533 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.211964 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.231153 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.247021 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 09:57:25.980739364 +0000 UTC Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.250867 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.268123 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.269490 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.269526 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.269537 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.269554 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.269566 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.279431 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.293223 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.314799 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.328864 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.348957 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.361690 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.372193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.372241 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.372297 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.372319 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.372332 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.375791 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.390370 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.403152 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:33Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.475356 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.475414 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.475426 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.475442 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.475451 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.501215 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.501265 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.501326 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:33 crc kubenswrapper[4925]: E0121 10:56:33.501379 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:33 crc kubenswrapper[4925]: E0121 10:56:33.501495 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.501731 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:33 crc kubenswrapper[4925]: E0121 10:56:33.501863 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:33 crc kubenswrapper[4925]: E0121 10:56:33.502276 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.578505 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.578560 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.578573 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.578591 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.578640 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.681472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.681542 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.681553 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.681569 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.681581 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.784202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.784245 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.784257 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.784272 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.784282 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.886780 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.886855 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.886879 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.886910 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.886932 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.989706 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.989772 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.989783 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.989800 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:33 crc kubenswrapper[4925]: I0121 10:56:33.989812 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:33Z","lastTransitionTime":"2026-01-21T10:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.092217 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.092669 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.092758 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.092833 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.092898 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.195562 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.195613 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.195632 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.195688 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.195713 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.247813 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 02:43:49.454140345 +0000 UTC Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.298643 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.298693 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.298707 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.298726 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.298740 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.402123 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.402193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.402209 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.402228 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.402241 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.505612 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.505663 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.505678 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.505695 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.505705 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.607671 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.607716 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.607726 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.607743 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.607753 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.709904 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.709965 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.709980 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.710003 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.710016 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.813038 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.813104 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.813115 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.813136 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.813147 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.917273 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.917336 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.917356 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.917379 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:34 crc kubenswrapper[4925]: I0121 10:56:34.917415 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:34Z","lastTransitionTime":"2026-01-21T10:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.022483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.022997 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.023134 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.023270 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.023385 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.126197 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.126237 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.126250 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.126267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.126281 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.228977 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.229020 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.229029 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.229045 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.229058 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.248444 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 04:10:05.759242115 +0000 UTC Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.333166 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.333258 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.333288 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.333320 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.333349 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.436746 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.436833 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.436860 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.436893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.436917 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.501827 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.501840 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.501883 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.501949 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:35 crc kubenswrapper[4925]: E0121 10:56:35.501962 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:35 crc kubenswrapper[4925]: E0121 10:56:35.502128 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:35 crc kubenswrapper[4925]: E0121 10:56:35.502212 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:35 crc kubenswrapper[4925]: E0121 10:56:35.502418 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.515525 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.540663 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.540777 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.540797 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.540869 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.540891 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.645578 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.645640 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.645652 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.645673 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.645685 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.705247 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:35 crc kubenswrapper[4925]: E0121 10:56:35.705608 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:35 crc kubenswrapper[4925]: E0121 10:56:35.705815 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:57:07.705766824 +0000 UTC m=+119.309658758 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.749416 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.749473 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.749483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.749508 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.749523 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.853519 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.853600 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.853628 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.853649 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.853665 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.957510 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.957585 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.957599 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.957619 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:35 crc kubenswrapper[4925]: I0121 10:56:35.957632 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:35Z","lastTransitionTime":"2026-01-21T10:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.061005 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.061072 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.061082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.061100 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.061111 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.165076 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.165139 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.165149 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.165170 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.165181 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.249502 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 07:06:49.139483478 +0000 UTC Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.270342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.270421 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.270436 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.270460 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.270480 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.373893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.373952 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.373962 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.373983 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.373999 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.477068 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.477138 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.477156 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.477179 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.477191 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.580737 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.580804 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.580815 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.580837 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.580849 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.684141 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.684187 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.684203 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.684226 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.684239 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.813377 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.813493 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.813506 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.813530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.813545 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.916434 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.916523 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.916538 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.916569 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:36 crc kubenswrapper[4925]: I0121 10:56:36.916600 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:36Z","lastTransitionTime":"2026-01-21T10:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.020706 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.020780 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.020795 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.020822 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.020837 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.124156 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.124213 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.124232 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.124254 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.124272 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.226913 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.226967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.226979 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.226998 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.227012 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.250229 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 15:14:00.690691423 +0000 UTC Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.330848 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.330912 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.330929 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.330975 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.330991 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.433916 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.433970 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.433982 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.433999 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.434011 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.501379 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.501509 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.501474 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:37 crc kubenswrapper[4925]: E0121 10:56:37.501632 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.501754 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:37 crc kubenswrapper[4925]: E0121 10:56:37.502045 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:37 crc kubenswrapper[4925]: E0121 10:56:37.502150 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:37 crc kubenswrapper[4925]: E0121 10:56:37.502240 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.538087 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.538178 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.538200 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.538232 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.538255 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.641121 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.641168 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.641179 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.641197 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.641208 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.744275 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.744332 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.744344 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.744363 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.744375 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.847549 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.847637 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.847656 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.847682 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.847700 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.950976 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.951060 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.951080 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.951107 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:37 crc kubenswrapper[4925]: I0121 10:56:37.951129 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:37Z","lastTransitionTime":"2026-01-21T10:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.053947 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.053998 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.054013 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.054033 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.054045 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.157286 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.157336 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.157345 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.157363 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.157378 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.262205 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 04:54:59.106208189 +0000 UTC Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.265662 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.265708 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.265723 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.265742 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.265754 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.368868 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.368965 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.368981 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.369005 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.369023 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.472540 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.472598 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.472607 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.472623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.472635 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.576200 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.576252 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.576262 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.576279 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.576290 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.679253 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.679291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.679301 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.679315 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.679326 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.782014 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.782078 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.782091 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.782112 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.782126 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.885930 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.886019 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.886042 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.886069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.886087 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.989176 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.989230 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.989244 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.989268 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:38 crc kubenswrapper[4925]: I0121 10:56:38.989282 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:38Z","lastTransitionTime":"2026-01-21T10:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.092440 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.092527 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.092540 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.092570 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.092625 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.144275 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.144367 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.144381 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.144432 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.144452 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.160905 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.166000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.166040 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.166051 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.166073 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.166089 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.179586 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.185180 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.185212 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.185226 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.185245 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.185257 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.198827 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.203708 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.203746 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.203760 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.203776 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.203786 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.219364 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.223731 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.223765 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.223775 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.223788 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.223798 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.238947 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.239094 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.241348 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.241383 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.241414 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.241442 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.241458 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.263020 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 12:35:23.005000088 +0000 UTC Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.345788 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.345865 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.345882 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.345907 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.345925 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.449924 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.450006 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.450022 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.450052 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.450068 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.501756 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.501827 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.501960 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.501972 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.502103 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.502146 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.502204 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:39 crc kubenswrapper[4925]: E0121 10:56:39.502268 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.518348 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.543501 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.553989 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.554057 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.554083 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.554107 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.554121 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.568218 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.590935 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.609011 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.629882 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.655684 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.657673 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.657775 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.657791 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.657820 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.657836 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.676786 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.695776 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f56fef17-59d9-4825-b850-ccc54be32da2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.721935 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.738906 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.756039 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.761218 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.761267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.761277 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.761296 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.761310 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.777178 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.794736 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.810814 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.829814 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.846105 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.864566 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.864732 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.864775 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.864787 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.864809 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.864826 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.884308 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:39Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.968809 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.968903 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.968920 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.968940 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:39 crc kubenswrapper[4925]: I0121 10:56:39.968985 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:39Z","lastTransitionTime":"2026-01-21T10:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.072895 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.072964 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.072982 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.073010 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.073023 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.176568 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.176625 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.176636 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.176660 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.176683 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.263891 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 15:27:04.127385958 +0000 UTC Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.278868 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.278932 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.278948 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.278964 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.278974 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.381590 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.381651 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.381670 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.381701 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.381735 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.485022 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.485511 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.485522 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.485537 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.485551 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.588313 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.588374 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.588387 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.588424 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.588436 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.692223 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.692305 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.692329 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.692365 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.692389 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.795770 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.795831 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.795844 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.795863 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.795878 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.899545 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.899608 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.899623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.899648 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:40 crc kubenswrapper[4925]: I0121 10:56:40.899670 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:40Z","lastTransitionTime":"2026-01-21T10:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.002385 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.002445 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.002456 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.002472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.002482 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.105497 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.105846 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.105946 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.106056 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.106169 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.208876 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.208914 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.208923 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.208938 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.208949 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.264993 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 08:08:37.581873592 +0000 UTC Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.311576 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.311677 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.311697 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.311722 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.311742 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.414287 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.414345 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.414361 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.414383 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.414421 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.501752 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.501851 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.501966 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:41 crc kubenswrapper[4925]: E0121 10:56:41.501915 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.501975 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:41 crc kubenswrapper[4925]: E0121 10:56:41.502106 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:41 crc kubenswrapper[4925]: E0121 10:56:41.502372 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:41 crc kubenswrapper[4925]: E0121 10:56:41.502884 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.517035 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.517109 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.517129 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.517152 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.517166 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.621409 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.621467 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.621479 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.621500 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.621513 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.725894 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.725982 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.726000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.726029 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.726043 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.829760 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.829812 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.829821 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.829842 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.829852 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.932908 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.932959 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.932976 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.932999 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:41 crc kubenswrapper[4925]: I0121 10:56:41.933015 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:41Z","lastTransitionTime":"2026-01-21T10:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.036192 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.036298 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.036329 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.036366 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.036436 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.139588 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.139637 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.139648 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.139665 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.139676 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.243527 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.243617 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.243629 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.243646 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.243659 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.266173 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 15:25:30.257994398 +0000 UTC Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.345775 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.345823 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.345834 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.345851 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.345863 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.449203 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.449267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.449292 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.449319 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.449335 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.552180 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.552218 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.552227 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.552245 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.552255 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.654747 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.654789 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.654801 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.654817 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.654829 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.757665 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.757716 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.757729 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.757781 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.757796 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.860654 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.860713 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.860725 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.860742 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.861279 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.973600 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.973644 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.973654 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.973668 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:42 crc kubenswrapper[4925]: I0121 10:56:42.973679 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:42Z","lastTransitionTime":"2026-01-21T10:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.076104 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.076476 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.076569 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.076669 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.076775 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.180062 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.180543 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.180857 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.181086 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.181313 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.266749 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 16:38:19.005952548 +0000 UTC Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.284138 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.284451 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.284707 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.284936 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.285161 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.387849 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.387934 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.387950 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.387981 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.388001 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.490225 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.490269 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.490281 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.490304 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.490323 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.501178 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.501194 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.501290 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:43 crc kubenswrapper[4925]: E0121 10:56:43.501461 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.501731 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:43 crc kubenswrapper[4925]: E0121 10:56:43.501824 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:43 crc kubenswrapper[4925]: E0121 10:56:43.502001 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:43 crc kubenswrapper[4925]: E0121 10:56:43.502170 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.593257 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.593306 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.593316 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.593334 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.593345 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.696768 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.696834 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.696848 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.696867 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.696879 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.799825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.799918 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.799945 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.799973 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.799992 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.903038 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.903088 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.903110 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.903129 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:43 crc kubenswrapper[4925]: I0121 10:56:43.903146 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:43Z","lastTransitionTime":"2026-01-21T10:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.006337 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.006428 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.006455 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.006481 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.006498 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.109346 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.109385 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.109407 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.109425 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.109434 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.212713 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.212776 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.212794 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.212815 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.212828 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.268614 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 15:41:13.196955052 +0000 UTC Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.315435 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.315506 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.315525 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.315549 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.315567 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.418426 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.418469 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.418483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.418503 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.418514 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.521958 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.522062 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.522077 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.522096 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.522109 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.625448 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.625506 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.625516 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.625534 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.625549 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.729158 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.729237 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.729252 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.729273 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.729287 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.831708 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.831769 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.831786 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.831807 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.831822 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.935256 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.935353 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.935372 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.935426 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:44 crc kubenswrapper[4925]: I0121 10:56:44.935442 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:44Z","lastTransitionTime":"2026-01-21T10:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.039563 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.039701 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.039715 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.039735 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.039748 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.143474 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.143553 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.143564 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.143584 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.143602 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.249144 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.249191 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.249202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.249226 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.249241 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.269468 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 13:41:11.446579023 +0000 UTC Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.352500 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.352650 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.352666 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.352685 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.352719 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.455696 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.455764 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.455799 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.455820 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.455839 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.502032 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.502270 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.502301 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.502897 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.502982 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.505087 4925 scope.go:117] "RemoveContainer" containerID="61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba" Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.505497 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.505852 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.507602 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.507890 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.559434 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.559486 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.559504 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.559525 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.559540 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.663247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.663317 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.663334 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.663360 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.663379 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.767624 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.767701 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.767715 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.767736 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.767749 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.871069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.871105 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.871115 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.871133 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.871144 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.927297 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.927495 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927517 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:57:49.927484973 +0000 UTC m=+161.531376927 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.927565 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.927611 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.927643 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927664 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927732 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:57:49.927712321 +0000 UTC m=+161.531604275 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927769 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927797 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927820 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927857 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:57:49.927846485 +0000 UTC m=+161.531738429 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927902 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927965 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927987 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.928084 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:57:49.928054982 +0000 UTC m=+161.531946916 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.927898 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: E0121 10:56:45.928159 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:57:49.928149996 +0000 UTC m=+161.532041930 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.975009 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.975063 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.975078 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.975096 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:45 crc kubenswrapper[4925]: I0121 10:56:45.975109 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:45Z","lastTransitionTime":"2026-01-21T10:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.078804 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.078863 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.078875 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.078895 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.078913 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.176106 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/0.log" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.176197 4925 generic.go:334] "Generic (PLEG): container finished" podID="82b678c3-b1e1-4294-9f9f-02103a6823cc" containerID="7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6" exitCode=1 Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.176252 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerDied","Data":"7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.176942 4925 scope.go:117] "RemoveContainer" containerID="7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.182283 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.182330 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.182342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.182359 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.182372 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.192909 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.211950 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.231639 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.246799 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.266359 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.269761 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 07:33:15.633793038 +0000 UTC Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.287898 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.287996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.288013 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.288046 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.288064 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.294814 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.310966 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.326559 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.343919 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"2026-01-21T10:55:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835\\\\n2026-01-21T10:55:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835 to /host/opt/cni/bin/\\\\n2026-01-21T10:56:01Z [verbose] multus-daemon started\\\\n2026-01-21T10:56:01Z [verbose] Readiness Indicator file check\\\\n2026-01-21T10:56:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.367237 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.391823 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.391878 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.391890 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.391928 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.391948 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.393684 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.407200 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f56fef17-59d9-4825-b850-ccc54be32da2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.428485 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.445599 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.462100 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.477738 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.495226 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.495776 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.495843 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.495890 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.495912 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.495927 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.511244 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.529295 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:46Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.598879 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.598928 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.598965 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.598981 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.598992 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.702694 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.703049 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.703917 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.704341 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.704528 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.807349 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.808073 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.808194 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.808283 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.808376 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.911968 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.912490 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.912506 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.912529 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:46 crc kubenswrapper[4925]: I0121 10:56:46.912541 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:46Z","lastTransitionTime":"2026-01-21T10:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.015209 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.015267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.015280 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.015299 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.015311 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.117191 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.117475 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.117542 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.117606 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.117679 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.181482 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/0.log" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.181696 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerStarted","Data":"61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.207984 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.220431 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.220483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.220501 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.220519 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.220530 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.226085 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.244100 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.264808 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.270506 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 16:42:51.249421482 +0000 UTC Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.278634 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.294979 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.310038 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.324627 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.324699 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.324712 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.324736 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.324752 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.326803 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.342630 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.355765 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.368262 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.382567 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f56fef17-59d9-4825-b850-ccc54be32da2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.407853 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.424457 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.428056 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.428136 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.428155 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.428181 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.428196 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.442539 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.457318 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"2026-01-21T10:55:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835\\\\n2026-01-21T10:55:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835 to /host/opt/cni/bin/\\\\n2026-01-21T10:56:01Z [verbose] multus-daemon started\\\\n2026-01-21T10:56:01Z [verbose] Readiness Indicator file check\\\\n2026-01-21T10:56:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.481139 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.501321 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.501321 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.501351 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.501373 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:47 crc kubenswrapper[4925]: E0121 10:56:47.501613 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:47 crc kubenswrapper[4925]: E0121 10:56:47.501648 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:47 crc kubenswrapper[4925]: E0121 10:56:47.501732 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:47 crc kubenswrapper[4925]: E0121 10:56:47.501913 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.502204 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.526961 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:47Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.530708 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.530741 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.530752 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.530780 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.530794 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.633195 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.633238 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.633247 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.633261 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.633269 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.736863 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.736942 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.736954 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.736977 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.736991 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.840100 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.840158 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.840171 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.840193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.840203 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.943213 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.943294 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.943313 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.943337 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:47 crc kubenswrapper[4925]: I0121 10:56:47.943356 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:47Z","lastTransitionTime":"2026-01-21T10:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.046646 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.046706 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.046721 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.046746 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.046764 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.150890 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.151014 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.151037 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.151070 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.151228 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.254167 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.254249 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.254267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.254296 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.254313 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.271641 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 11:09:23.87160377 +0000 UTC Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.358226 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.358301 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.358314 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.358548 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.358563 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.461281 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.461347 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.461360 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.461379 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.461433 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.564280 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.564334 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.564347 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.564367 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.564381 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.667671 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.667753 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.667774 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.667808 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.667832 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.770954 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.771004 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.771015 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.771034 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.771046 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.873697 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.873747 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.873759 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.873775 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.873787 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.976497 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.976588 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.976603 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.976623 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:48 crc kubenswrapper[4925]: I0121 10:56:48.976636 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:48Z","lastTransitionTime":"2026-01-21T10:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.080233 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.080295 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.080307 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.080327 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.080345 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.182425 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.182474 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.182488 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.182521 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.182543 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.271958 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 05:19:23.946354138 +0000 UTC Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.285019 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.285103 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.285121 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.285152 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.285174 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.388024 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.388095 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.388110 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.388139 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.388158 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.521524 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.521711 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.521762 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.521899 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.523024 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.523355 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.523483 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.523622 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.524090 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.524147 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.524158 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.524176 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.524193 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.542374 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.557501 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.572789 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.590698 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.606234 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.618679 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.627296 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.627590 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.627640 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.627665 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.627681 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.629562 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.629593 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.629602 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.629620 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.629634 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.634905 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.643482 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.649717 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.649765 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.649777 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.649794 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.649805 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.650783 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"2026-01-21T10:55:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835\\\\n2026-01-21T10:55:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835 to /host/opt/cni/bin/\\\\n2026-01-21T10:56:01Z [verbose] multus-daemon started\\\\n2026-01-21T10:56:01Z [verbose] Readiness Indicator file check\\\\n2026-01-21T10:56:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.667574 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.671976 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.673121 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.673232 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.673253 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.673275 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.673289 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.686858 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.688687 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.693470 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.693511 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.693522 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.693543 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.693558 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.701125 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f56fef17-59d9-4825-b850-ccc54be32da2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.707993 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.712864 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.712926 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.712946 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.712972 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.712989 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.727155 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.730824 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: E0121 10:56:49.730984 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.733316 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.733366 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.733378 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.733427 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.733453 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.746863 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.761038 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.778270 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.795609 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.809234 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.826656 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.835779 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.835825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.835835 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.835853 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.835864 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.843036 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:49Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.940413 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.940498 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.940510 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.940534 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:49 crc kubenswrapper[4925]: I0121 10:56:49.940548 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:49Z","lastTransitionTime":"2026-01-21T10:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.044102 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.044248 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.044261 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.044287 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.044302 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.147963 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.148015 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.148026 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.148043 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.148054 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.251616 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.251662 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.251674 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.251693 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.251704 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.273003 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 07:53:33.302547085 +0000 UTC Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.355261 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.355316 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.355332 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.355352 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.355363 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.457472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.457547 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.457567 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.457599 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.457652 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.560763 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.560814 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.560826 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.560844 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.560855 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.664269 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.664327 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.664341 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.664361 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.664371 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.767917 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.767968 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.767984 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.768006 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.768018 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.871071 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.871119 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.871130 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.871148 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.871159 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.973943 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.973989 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.974002 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.974024 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:50 crc kubenswrapper[4925]: I0121 10:56:50.974037 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:50Z","lastTransitionTime":"2026-01-21T10:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.077069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.077150 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.077160 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.077176 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.077185 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.180771 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.180852 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.180867 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.180886 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.180898 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.274068 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 19:10:20.689114492 +0000 UTC Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.282904 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.282951 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.282980 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.282999 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.283012 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.386077 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.386136 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.386154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.386174 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.386188 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.488594 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.488658 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.488671 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.488692 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.488705 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.501617 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.501614 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.501696 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.501721 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:51 crc kubenswrapper[4925]: E0121 10:56:51.501896 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:51 crc kubenswrapper[4925]: E0121 10:56:51.501988 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:51 crc kubenswrapper[4925]: E0121 10:56:51.502102 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:51 crc kubenswrapper[4925]: E0121 10:56:51.502210 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.591945 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.592002 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.592016 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.592033 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.592046 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.695864 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.696009 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.696025 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.696045 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.696058 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.798936 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.799010 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.799022 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.799045 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.799059 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.901572 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.901630 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.901645 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.901664 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:51 crc kubenswrapper[4925]: I0121 10:56:51.901676 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:51Z","lastTransitionTime":"2026-01-21T10:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.004316 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.004362 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.004375 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.004413 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.004429 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.107382 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.107510 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.107537 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.107567 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.107591 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.211437 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.211482 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.211492 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.211507 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.211518 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.275306 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 09:09:01.176191786 +0000 UTC Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.314838 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.314872 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.314880 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.314911 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.314921 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.418692 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.418767 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.418782 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.418800 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.419165 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.522160 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.522219 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.522229 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.522244 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.522257 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.625412 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.625466 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.625484 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.625501 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.625511 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.728219 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.728263 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.728273 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.728291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.728332 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.830858 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.830932 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.830940 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.830956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.830972 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.933201 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.933244 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.933253 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.933267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:52 crc kubenswrapper[4925]: I0121 10:56:52.933278 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:52Z","lastTransitionTime":"2026-01-21T10:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.036572 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.036632 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.036644 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.036666 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.036677 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.141705 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.141786 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.141798 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.142520 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.142627 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.246484 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.246585 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.246609 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.246642 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.246661 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.276237 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 06:13:50.316041215 +0000 UTC Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.349976 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.350047 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.350058 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.350073 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.350084 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.452922 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.452986 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.453001 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.453020 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.453032 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.501753 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:53 crc kubenswrapper[4925]: E0121 10:56:53.501921 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.501774 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:53 crc kubenswrapper[4925]: E0121 10:56:53.502010 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.501752 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.501780 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:53 crc kubenswrapper[4925]: E0121 10:56:53.502070 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:53 crc kubenswrapper[4925]: E0121 10:56:53.502142 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.556028 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.556099 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.556112 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.556132 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.556146 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.659160 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.659223 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.659234 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.659252 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.659267 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.796312 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.796370 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.796384 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.796434 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.796451 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.899856 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.899940 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.899957 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.899979 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:53 crc kubenswrapper[4925]: I0121 10:56:53.899992 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:53Z","lastTransitionTime":"2026-01-21T10:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.003200 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.003255 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.003264 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.003284 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.003295 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.106071 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.106129 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.106142 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.106161 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.106176 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.207659 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.207703 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.207713 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.207730 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.207741 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.277121 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 23:50:05.621055408 +0000 UTC Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.310903 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.310947 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.310992 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.311009 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.311018 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.413460 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.413515 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.413527 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.413545 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.413558 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.517935 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.517996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.518007 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.518023 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.518036 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.620720 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.620775 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.620785 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.620805 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.620817 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.722905 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.722949 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.722958 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.722972 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.722981 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.826476 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.826544 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.826564 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.826588 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.826604 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.974550 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.974605 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.974617 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.974646 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:54 crc kubenswrapper[4925]: I0121 10:56:54.974662 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:54Z","lastTransitionTime":"2026-01-21T10:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.078300 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.078359 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.078375 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.078417 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.078432 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.182643 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.182696 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.182706 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.182722 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.182734 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.277586 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 23:27:26.840213885 +0000 UTC Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.285889 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.285939 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.285953 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.285971 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.285982 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.388859 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.388939 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.388952 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.388970 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.388981 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.491875 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.491923 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.491934 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.491951 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.491963 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.501365 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.501431 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.501431 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:55 crc kubenswrapper[4925]: E0121 10:56:55.501544 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:55 crc kubenswrapper[4925]: E0121 10:56:55.501607 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.501686 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:55 crc kubenswrapper[4925]: E0121 10:56:55.501739 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:55 crc kubenswrapper[4925]: E0121 10:56:55.501979 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.595016 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.595091 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.595104 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.595127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.595143 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.697491 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.697561 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.697573 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.697596 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.697610 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.800620 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.800702 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.800760 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.800799 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.800825 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.905025 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.905077 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.905087 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.905115 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:55 crc kubenswrapper[4925]: I0121 10:56:55.905135 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:55Z","lastTransitionTime":"2026-01-21T10:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.009275 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.009347 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.009366 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.009423 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.009442 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.120004 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.121559 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.121633 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.121668 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.121694 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.225575 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.225661 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.225690 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.225723 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.225751 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.278809 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 03:00:37.478276556 +0000 UTC Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.328841 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.328878 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.328889 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.328907 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.328919 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.431165 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.431210 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.431222 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.431239 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.431251 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.534814 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.534866 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.534880 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.534899 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.534912 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.637646 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.637699 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.637713 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.637730 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.637744 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.740898 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.740994 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.741014 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.741040 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.741061 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.861936 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.862021 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.862037 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.862054 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.862065 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.966828 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.966911 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.966934 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.966967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:56 crc kubenswrapper[4925]: I0121 10:56:56.966990 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:56Z","lastTransitionTime":"2026-01-21T10:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.069665 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.069749 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.069764 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.069789 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.069805 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.173631 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.173711 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.173725 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.173749 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.173762 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.276963 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.277056 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.277077 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.277104 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.277122 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.279624 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 08:36:46.619578418 +0000 UTC Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.382010 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.382078 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.382092 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.382127 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.382141 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.484904 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.484980 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.485003 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.485024 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.485038 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.501682 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.501772 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.501819 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.501968 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:57 crc kubenswrapper[4925]: E0121 10:56:57.501992 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:57 crc kubenswrapper[4925]: E0121 10:56:57.502050 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:57 crc kubenswrapper[4925]: E0121 10:56:57.502122 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:57 crc kubenswrapper[4925]: E0121 10:56:57.502165 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.503196 4925 scope.go:117] "RemoveContainer" containerID="61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.587428 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.587515 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.587528 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.587552 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.587569 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.690595 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.690672 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.690688 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.690706 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.690718 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.793684 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.793766 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.793784 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.793803 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.793813 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.897135 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.897178 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.897189 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.897206 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.897219 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.999333 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.999428 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.999452 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.999472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:57 crc kubenswrapper[4925]: I0121 10:56:57.999487 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:57Z","lastTransitionTime":"2026-01-21T10:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.101812 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.101874 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.101888 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.101908 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.101922 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.204487 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.204572 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.204597 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.204617 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.204627 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.280455 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 17:05:01.724275818 +0000 UTC Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.308129 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.308190 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.308204 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.308221 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.308233 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.411948 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.412019 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.412036 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.412059 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.412077 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.515231 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.515276 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.515287 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.515304 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.515316 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.618267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.618317 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.618331 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.618349 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.618362 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.720867 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.720915 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.720928 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.720944 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.720956 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.824063 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.824110 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.824124 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.824143 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.824156 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.926905 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.926954 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.926966 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.926986 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:58 crc kubenswrapper[4925]: I0121 10:56:58.926997 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:58Z","lastTransitionTime":"2026-01-21T10:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.030299 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.030342 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.030352 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.030368 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.030385 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.134020 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.134089 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.134101 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.134122 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.134136 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.236861 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.236906 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.236916 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.236932 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.236943 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.281502 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 11:54:54.758135331 +0000 UTC Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.340332 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.340426 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.340444 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.340469 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.340493 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.443478 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.443541 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.443553 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.443573 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.443587 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.501508 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.501622 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.501816 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.502096 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.502318 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.502458 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.502530 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.502671 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.524307 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.541874 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.550306 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.550357 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.550370 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.550410 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.550428 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.557523 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.568969 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.579424 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.590778 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.601673 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f56fef17-59d9-4825-b850-ccc54be32da2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.647288 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.660093 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.660155 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.660172 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.660193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.660207 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.665352 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.680075 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.699791 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"2026-01-21T10:55:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835\\\\n2026-01-21T10:55:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835 to /host/opt/cni/bin/\\\\n2026-01-21T10:56:01Z [verbose] multus-daemon started\\\\n2026-01-21T10:56:01Z [verbose] Readiness Indicator file check\\\\n2026-01-21T10:56:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.730583 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.760658 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.766661 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.766734 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.766753 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.766778 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.766793 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.777605 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.801829 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.822381 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.846162 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.869649 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.870053 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.870081 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.870093 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.870106 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.870119 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.874489 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.874512 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.874520 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.874530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.874539 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.885837 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.892793 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.898300 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.898338 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.898349 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.898365 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.898376 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.915310 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.920765 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.920801 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.920810 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.920824 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.920833 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.935188 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.940783 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.940833 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.940847 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.940866 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.940880 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.955163 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.961091 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.961186 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.961198 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.961220 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.961234 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.978353 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bb3c8461-270f-4cd5-aa85-780d3a9e3ead\\\",\\\"systemUUID\\\":\\\"57887b03-108e-4b07-83a9-2cba1ffe7256\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:56:59Z is after 2025-08-24T17:21:41Z" Jan 21 10:56:59 crc kubenswrapper[4925]: E0121 10:56:59.978627 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.981652 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.981711 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.981725 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.981750 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:56:59 crc kubenswrapper[4925]: I0121 10:56:59.981764 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:56:59Z","lastTransitionTime":"2026-01-21T10:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.084902 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.084967 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.084982 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.085009 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.085033 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:00Z","lastTransitionTime":"2026-01-21T10:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.188502 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.188556 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.188570 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.188587 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.188600 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:00Z","lastTransitionTime":"2026-01-21T10:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.236374 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/2.log" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.240072 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.245603 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.274914 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.302208 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.325703 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.465203 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.501744 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.527676 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.549217 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f56fef17-59d9-4825-b850-ccc54be32da2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.587421 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.608960 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.629915 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.650417 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"2026-01-21T10:55:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835\\\\n2026-01-21T10:55:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835 to /host/opt/cni/bin/\\\\n2026-01-21T10:56:01Z [verbose] multus-daemon started\\\\n2026-01-21T10:56:01Z [verbose] Readiness Indicator file check\\\\n2026-01-21T10:56:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.676787 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.837432 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.837611 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.838104 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 09:45:37.070263695 +0000 UTC Jan 21 10:57:00 crc kubenswrapper[4925]: E0121 10:57:00.838120 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.838347 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:00 crc kubenswrapper[4925]: E0121 10:57:00.838751 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:00 crc kubenswrapper[4925]: E0121 10:57:00.839149 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.842645 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.842697 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.842728 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.842751 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.842767 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:00Z","lastTransitionTime":"2026-01-21T10:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.872985 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.892538 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.911465 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.927616 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.949310 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.952350 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.952383 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.952421 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.952437 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.952449 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:00Z","lastTransitionTime":"2026-01-21T10:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.973939 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:00 crc kubenswrapper[4925]: I0121 10:57:00.986687 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:00Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.056369 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.056466 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.056485 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.056508 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.056524 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.160514 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.160598 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.160616 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.160641 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.160659 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.264015 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.264082 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.264094 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.264115 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.264132 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.368060 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.368118 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.368129 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.368149 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.368180 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.472128 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.472175 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.472188 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.472208 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.472222 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.699670 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:01 crc kubenswrapper[4925]: E0121 10:57:01.699916 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.706472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.709689 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.709710 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.709751 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.709784 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.813887 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.813956 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.813970 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.813991 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.814023 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.838581 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 14:12:13.507449227 +0000 UTC Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.918219 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.918289 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.918302 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.918323 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:01 crc kubenswrapper[4925]: I0121 10:57:01.918339 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:01Z","lastTransitionTime":"2026-01-21T10:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.021353 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.021437 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.021451 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.021473 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.021485 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.125781 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.126161 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.126173 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.126193 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.126207 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.229906 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.229995 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.230010 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.230033 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.230048 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.334063 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.334149 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.334170 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.334199 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.334219 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.438215 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.438303 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.438316 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.438334 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.438345 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.501761 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.501897 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.501965 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:02 crc kubenswrapper[4925]: E0121 10:57:02.502128 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:02 crc kubenswrapper[4925]: E0121 10:57:02.502257 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:02 crc kubenswrapper[4925]: E0121 10:57:02.502450 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.544721 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.544835 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.544852 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.544893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.544917 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.656747 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.656810 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.656823 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.656844 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.656859 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.760361 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.760430 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.760442 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.760457 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.760468 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.839725 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 14:02:53.716486645 +0000 UTC Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.863804 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.863852 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.863874 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.863892 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.863903 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.970458 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.970535 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.970550 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.970575 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:02 crc kubenswrapper[4925]: I0121 10:57:02.970591 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:02Z","lastTransitionTime":"2026-01-21T10:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.074182 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.074280 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.074291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.074311 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.074342 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.177612 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.177678 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.177688 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.177709 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.177720 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.258456 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/3.log" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.260477 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/2.log" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.266272 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" exitCode=1 Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.266472 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.266671 4925 scope.go:117] "RemoveContainer" containerID="61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.268161 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 10:57:03 crc kubenswrapper[4925]: E0121 10:57:03.268673 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.286850 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.286915 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.286929 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.286946 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.286958 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.292369 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0121 10:55:33.993884 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0121 10:55:33.994793 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1139657181/tls.crt::/tmp/serving-cert-1139657181/tls.key\\\\\\\"\\\\nI0121 10:55:41.270679 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0121 10:55:41.275713 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 10:55:41.275761 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 10:55:41.275808 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 10:55:41.275816 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 10:55:41.285338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0121 10:55:41.285371 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0121 10:55:41.285387 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285419 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 10:55:41.285426 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 10:55:41.285432 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 10:55:41.285436 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 10:55:41.285439 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0121 10:55:41.313285 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.313958 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c638e29d-7b20-434a-8b57-541580d7a1a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87b23c0c0b106333e899808503c75e4cddc03796a9a73999bbae2cd6ddf9ad69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1fced9b160f6e886dc18719ab951158b90ed811ac9be3678e4c313d9083ff79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4eb541ac79168a881d566633b932b9c0676afa3fb4ff323cdd61dbc9e8ab8f32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a2b7e16df6a3c2aeed8a4030d5133b2e627c46ea2ba028e2ad04b43f1bb0940\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.337161 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.365536 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b0b25f1-8430-459d-9805-e667615dc073\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2360e8974afeb6f6f6e791d54ad5692cca48db896559f6f4f7f8fad690702bc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5337236f90755ca670a891522aa8de0c1bf70897b926fe2180fa9d127bcebfff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd5f2ee5f714f6406c325983b7d3be38cc01949d133288257a27b59ef9d927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9ca95069aa8445ebe8c814fe65026ccea9ae77d72d2e852538af0f8cbf55d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e9839c24b2b6d047327f9ac6c837f34e52733ba43303c396274336e45463158\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://77b0d156bf77c1998dc8adf5559026f7ecb0b325c222cb54ed501066e52957d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://588500514d82e382e98cb3ec1e1b4b9e289177335b14f0755118a4a04615f024\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:56:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-928hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-pbw2x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.387650 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jqsxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e986f25-2ad4-428d-b6a5-f99e1a480285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a9eef2058474e56ba34d38d3ddca3744a8ce5651b7c5e9db26d760ddb6ff69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-flgnv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jqsxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.390742 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.390833 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.390890 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.390927 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.390946 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.411880 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef172b7c393e1cee17930ac5af7fe241a9fa2a883b1b514b02ef92b85753d3fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.430658 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6126cb8e2c5b03b5eed9abc6d67c1c879dd1fba01c28aedafaba954944d539e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1123944321352ffd90e5cac85acf1a5280c834745d3fa57397189801f73ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.452693 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.472494 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rzmbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae658aae-64a2-4df8-938c-7a4c2a35655f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ac87445a2f5c9d4f5b2987a1a0d91da2ae2abb669a43cc003dbe89b8a2d943f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hb96t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rzmbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.488564 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2txwq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3596d1-1f08-4703-ab63-c29358aac0d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7qwdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2txwq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.495364 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.495439 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.495458 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.495483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.495497 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.500725 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:03 crc kubenswrapper[4925]: E0121 10:57:03.500913 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.508143 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f56fef17-59d9-4825-b850-ccc54be32da2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc5405bf612569a42ed39d11df66003beb1842e098e64e83e32d5937fa244748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5833154ccd5838f9fd018d318eae78e2bf8194f96fa5b0c62a62e7188edda89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.541853 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"271db2a9-18ea-4fdf-970b-565423baa763\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e1115f0dc95ad300dcb29254276584392fc89b86e12434f9bbd23f086f3b72a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://911e515783490b5e9ea5a987ac13a439e3d85ca5c4cde18d94ca050cf37bbf41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3ee01ad5562e42eb0f5d21e53523a9115b699c0c7b65c8cf46f66093d64b621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83bc0c93829fb5e201b4e322e30710b40d1ba1e517e3653adf5d308376e9bd59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa4a31416acfdbc015aebbfd20566fb16e86890ab1103dae34b286b652ef22b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73d7e586b4c0359a763521acce5bb00d95d1c65be6616e22ba488630591e7fb7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7f6a7c40756c1226c68151288eca0008e564e9401755b5719562daf0970cb87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3ce4e3a65da0ec449094efdad19f75fbbec9ae8df36d0ef2743710870630afe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.560063 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.578276 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ecb260361247a51a18adcd5c2f1b7d0794ac5e4ff9fc62e1184a906a88e6f61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.597165 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hwzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"82b678c3-b1e1-4294-9f9f-02103a6823cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:46Z\\\",\\\"message\\\":\\\"2026-01-21T10:55:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835\\\\n2026-01-21T10:55:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3fef411f-9c57-4d73-aacd-ee2b6d95e835 to /host/opt/cni/bin/\\\\n2026-01-21T10:56:01Z [verbose] multus-daemon started\\\\n2026-01-21T10:56:01Z [verbose] Readiness Indicator file check\\\\n2026-01-21T10:56:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dccvf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hwzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.598977 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.599037 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.599048 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.599069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.599086 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.625865 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a976857-73df-49d9-9b7e-b5cb3d250a5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61b11578472f525c331f6c36aaf7dde28e5f8ccf5a7eaf9d1c0fc2094bf254ba\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:56:31Z\\\",\\\"message\\\":\\\"mers/externalversions/factory.go:141\\\\nI0121 10:56:31.461586 6696 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:56:31.461756 6696 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 10:56:31.461782 6696 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 10:56:31.461859 6696 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:56:31.461876 6696 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 10:56:31.461902 6696 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 10:56:31.461975 6696 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 10:56:31.462018 6696 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:56:31.462023 6696 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:56:31.462036 6696 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:56:31.462055 6696 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 10:56:31.462104 6696 factory.go:656] Stopping watch factory\\\\nI0121 10:56:31.462130 6696 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10:56:31.462141 6696 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T10:57:02Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:57:02.068362 6998 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:57:02.068479 6998 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0121 10:57:02.068559 6998 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0121 10:57:02.069135 6998 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 10:57:02.069164 6998 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 10:57:02.069207 6998 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 10:57:02.069221 6998 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 10:57:02.069255 6998 factory.go:656] Stopping watch factory\\\\nI0121 10:57:02.069265 6998 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 10:57:02.069276 6998 ovnkube.go:599] Stopped ovnkube\\\\nI0121 10:57:02.069282 6998 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 10\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T10:56:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T10:55:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cjd7c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9hk9g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.642574 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8599a6b-48cb-400d-ac34-86be75b9ce54\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7abeabd7d35259f43ca71cab8186c754404da0729d01537d4944c288ee29dd9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e14911cba04d8ce7438ae52b0ba4a94ee273b74e762337b24f5a75555f20d938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9tw4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:56:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glmql\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.660329 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3452af59-1a7b-4147-b7e9-456502086479\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b141ccaf34075c21ff36aa3eb0498a135944c566b4152282f171adc48049980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e501f2e1e63a725e3d6ca4f990e802123f5d4b05e1bc7a1b6cc815d41fd494a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36139de8df5e2492c9fba1a73c189ccd01a6aaeb57ea7c972b0a9af9d3ca3465\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.677470 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f21c81eb-6979-46c3-9594-e4916d36fb0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T10:55:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a62f26ad040bdb92c2971e464b97f36b2d8196bb4b9a3058da6085bd2e57b153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T10:55:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46dvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T10:55:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rzs4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T10:57:03Z is after 2025-08-24T17:21:41Z" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.702527 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.702581 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.702629 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.702653 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.702668 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.811097 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.811184 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.811200 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.811227 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.811247 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.840918 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 09:04:27.623819858 +0000 UTC Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.915170 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.915235 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.915246 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.915272 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:03 crc kubenswrapper[4925]: I0121 10:57:03.915286 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:03Z","lastTransitionTime":"2026-01-21T10:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.027204 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.027270 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.027303 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.027325 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.027339 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.130267 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.130311 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.130322 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.130339 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.130353 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.233108 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.233167 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.233183 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.233202 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.233213 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.273682 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/3.log" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.336205 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.336257 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.336270 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.336291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.336305 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.439666 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.439777 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.439800 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.439825 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.439841 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.502819 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:04 crc kubenswrapper[4925]: E0121 10:57:04.503091 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.503472 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:04 crc kubenswrapper[4925]: E0121 10:57:04.503542 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.503690 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:04 crc kubenswrapper[4925]: E0121 10:57:04.503750 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.542837 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.542880 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.542891 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.542906 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.542917 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.646944 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.646989 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.647000 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.647018 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.647030 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.749964 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.750025 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.750037 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.750054 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.750065 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.841544 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 14:51:02.336628983 +0000 UTC Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.853322 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.853371 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.853387 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.853431 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.853447 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.957336 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.957416 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.957431 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.957453 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:04 crc kubenswrapper[4925]: I0121 10:57:04.957464 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:04Z","lastTransitionTime":"2026-01-21T10:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.065028 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.065105 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.065152 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.065184 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.065195 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.169291 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.169454 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.169472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.169522 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.169540 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.273016 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.273089 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.273104 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.273130 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.273145 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.376909 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.376982 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.376996 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.377021 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.377038 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.481289 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.481352 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.481378 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.481420 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.481435 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.502006 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:05 crc kubenswrapper[4925]: E0121 10:57:05.502447 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.585382 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.585490 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.585507 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.585530 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.585547 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.689535 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.689603 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.689624 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.689646 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.689664 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.793827 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.793917 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.793933 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.793954 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.793967 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.841717 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 15:07:01.454687978 +0000 UTC Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.897680 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.897740 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.897761 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.897785 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:05 crc kubenswrapper[4925]: I0121 10:57:05.897797 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:05Z","lastTransitionTime":"2026-01-21T10:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.001350 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.001452 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.001466 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.001489 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.001503 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.105048 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.105115 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.105130 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.105154 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.105172 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.208813 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.208886 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.208896 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.208911 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.208924 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.311375 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.311455 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.311468 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.311487 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.311500 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.441093 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.441214 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.441286 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.441324 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.441376 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.500732 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.500771 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.500738 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:06 crc kubenswrapper[4925]: E0121 10:57:06.500994 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:06 crc kubenswrapper[4925]: E0121 10:57:06.501078 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:06 crc kubenswrapper[4925]: E0121 10:57:06.501204 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.545087 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.545145 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.545159 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.545181 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.545195 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.649260 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.649323 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.649336 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.649354 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.649367 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.752799 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.752893 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.752912 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.752938 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.752956 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.842639 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 19:30:11.728931381 +0000 UTC Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.856564 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.856614 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.856626 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.856644 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.856659 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.960309 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.960434 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.960472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.960504 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:06 crc kubenswrapper[4925]: I0121 10:57:06.960525 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:06Z","lastTransitionTime":"2026-01-21T10:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.063255 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.063367 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.063377 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.063436 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.063450 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.166534 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.166661 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.166679 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.166707 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.166724 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.284645 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.285069 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.285085 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.285110 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.285129 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.389108 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.389176 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.389189 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.389209 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.389221 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.492499 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.492899 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.492987 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.493084 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.493154 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.501126 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:07 crc kubenswrapper[4925]: E0121 10:57:07.501367 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.597268 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.597321 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.597341 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.597360 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.597371 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.701355 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.701462 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.701483 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.701510 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.701760 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.791342 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:07 crc kubenswrapper[4925]: E0121 10:57:07.791774 4925 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:57:07 crc kubenswrapper[4925]: E0121 10:57:07.792134 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs podName:5c3596d1-1f08-4703-ab63-c29358aac0d9 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:11.792090755 +0000 UTC m=+183.395982689 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs") pod "network-metrics-daemon-2txwq" (UID: "5c3596d1-1f08-4703-ab63-c29358aac0d9") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.805876 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.805921 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.805935 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.805959 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.805974 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.843501 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 23:57:05.662449465 +0000 UTC Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.908867 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.908941 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.908960 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.908986 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:07 crc kubenswrapper[4925]: I0121 10:57:07.909003 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:07Z","lastTransitionTime":"2026-01-21T10:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.013020 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.013085 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.013104 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.013131 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.013150 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.116847 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.116887 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.116899 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.116916 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.116926 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.220279 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.220341 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.220352 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.220378 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.220412 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.324579 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.324644 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.324657 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.324676 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.324689 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.428354 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.428464 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.428480 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.428506 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.428524 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.501292 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.501546 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.501670 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:08 crc kubenswrapper[4925]: E0121 10:57:08.501695 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:08 crc kubenswrapper[4925]: E0121 10:57:08.501848 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:08 crc kubenswrapper[4925]: E0121 10:57:08.502019 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.534290 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.534366 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.534470 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.534516 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.534549 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.639443 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.639525 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.639543 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.639565 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.639589 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.744103 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.744176 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.744221 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.744249 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.744271 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.908007 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 23:43:52.396519952 +0000 UTC Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.912938 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.912989 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.913001 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.913021 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:08 crc kubenswrapper[4925]: I0121 10:57:08.913036 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:08Z","lastTransitionTime":"2026-01-21T10:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.019325 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.019512 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.019543 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.019563 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.019577 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:09Z","lastTransitionTime":"2026-01-21T10:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.123058 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.123108 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.123116 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.123133 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.123143 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:09Z","lastTransitionTime":"2026-01-21T10:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.226298 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.226377 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.226391 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.226441 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.226456 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:09Z","lastTransitionTime":"2026-01-21T10:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.329753 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.330191 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.330311 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.330472 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.330614 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:09Z","lastTransitionTime":"2026-01-21T10:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:09 crc kubenswrapper[4925]: E0121 10:57:09.464024 4925 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.501714 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:09 crc kubenswrapper[4925]: E0121 10:57:09.502037 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:09 crc kubenswrapper[4925]: E0121 10:57:09.838536 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:09 crc kubenswrapper[4925]: I0121 10:57:09.908438 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 07:09:57.035327452 +0000 UTC Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.035921 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-hwzqb" podStartSLOduration=83.035840082 podStartE2EDuration="1m23.035840082s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.03397278 +0000 UTC m=+121.637864744" watchObservedRunningTime="2026-01-21 10:57:10.035840082 +0000 UTC m=+121.639732006" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.092659 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glmql" podStartSLOduration=82.092632391 podStartE2EDuration="1m22.092632391s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.092590429 +0000 UTC m=+121.696482373" watchObservedRunningTime="2026-01-21 10:57:10.092632391 +0000 UTC m=+121.696524325" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.119022 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=35.118987231 podStartE2EDuration="35.118987231s" podCreationTimestamp="2026-01-21 10:56:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.117994159 +0000 UTC m=+121.721886093" watchObservedRunningTime="2026-01-21 10:57:10.118987231 +0000 UTC m=+121.722879165" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.201425 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=87.201383806 podStartE2EDuration="1m27.201383806s" podCreationTimestamp="2026-01-21 10:55:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.200519987 +0000 UTC m=+121.804411921" watchObservedRunningTime="2026-01-21 10:57:10.201383806 +0000 UTC m=+121.805275740" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.247137 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=89.247111864 podStartE2EDuration="1m29.247111864s" podCreationTimestamp="2026-01-21 10:55:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.245526142 +0000 UTC m=+121.849418086" watchObservedRunningTime="2026-01-21 10:57:10.247111864 +0000 UTC m=+121.851003798" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.296430 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podStartSLOduration=83.296369121 podStartE2EDuration="1m23.296369121s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.261806546 +0000 UTC m=+121.865698500" watchObservedRunningTime="2026-01-21 10:57:10.296369121 +0000 UTC m=+121.900261055" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.297182 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-pbw2x" podStartSLOduration=83.297173259 podStartE2EDuration="1m23.297173259s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.295651707 +0000 UTC m=+121.899543661" watchObservedRunningTime="2026-01-21 10:57:10.297173259 +0000 UTC m=+121.901065193" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.315818 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-jqsxs" podStartSLOduration=83.315796061 podStartE2EDuration="1m23.315796061s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.314859869 +0000 UTC m=+121.918751823" watchObservedRunningTime="2026-01-21 10:57:10.315796061 +0000 UTC m=+121.919687995" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.321591 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.321680 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.321713 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.321746 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.321762 4925 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T10:57:10Z","lastTransitionTime":"2026-01-21T10:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.343277 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.343244418 podStartE2EDuration="1m29.343244418s" podCreationTimestamp="2026-01-21 10:55:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.34269163 +0000 UTC m=+121.946583574" watchObservedRunningTime="2026-01-21 10:57:10.343244418 +0000 UTC m=+121.947136352" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.367217 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=59.367139247 podStartE2EDuration="59.367139247s" podCreationTimestamp="2026-01-21 10:56:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.366447514 +0000 UTC m=+121.970339448" watchObservedRunningTime="2026-01-21 10:57:10.367139247 +0000 UTC m=+121.971031201" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.417201 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk"] Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.418778 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.423972 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.424107 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.424980 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.425212 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.426744 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-rzmbp" podStartSLOduration=84.426718888 podStartE2EDuration="1m24.426718888s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:10.426124439 +0000 UTC m=+122.030016393" watchObservedRunningTime="2026-01-21 10:57:10.426718888 +0000 UTC m=+122.030610822" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.477095 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1b1fd621-6a72-4e19-b778-3a8e3950ee68-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.478131 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b1fd621-6a72-4e19-b778-3a8e3950ee68-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.478281 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1b1fd621-6a72-4e19-b778-3a8e3950ee68-service-ca\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.478367 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1b1fd621-6a72-4e19-b778-3a8e3950ee68-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.478424 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b1fd621-6a72-4e19-b778-3a8e3950ee68-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.500872 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.501048 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.501094 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:10 crc kubenswrapper[4925]: E0121 10:57:10.501105 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:10 crc kubenswrapper[4925]: E0121 10:57:10.501255 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:10 crc kubenswrapper[4925]: E0121 10:57:10.501462 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.579753 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b1fd621-6a72-4e19-b778-3a8e3950ee68-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.579825 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1b1fd621-6a72-4e19-b778-3a8e3950ee68-service-ca\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.579873 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1b1fd621-6a72-4e19-b778-3a8e3950ee68-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.579901 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b1fd621-6a72-4e19-b778-3a8e3950ee68-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.579930 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1b1fd621-6a72-4e19-b778-3a8e3950ee68-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.580019 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1b1fd621-6a72-4e19-b778-3a8e3950ee68-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.580053 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1b1fd621-6a72-4e19-b778-3a8e3950ee68-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.581918 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1b1fd621-6a72-4e19-b778-3a8e3950ee68-service-ca\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.588948 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b1fd621-6a72-4e19-b778-3a8e3950ee68-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.605524 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b1fd621-6a72-4e19-b778-3a8e3950ee68-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-t6bnk\" (UID: \"1b1fd621-6a72-4e19-b778-3a8e3950ee68\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.739369 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" Jan 21 10:57:10 crc kubenswrapper[4925]: W0121 10:57:10.761013 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b1fd621_6a72_4e19_b778_3a8e3950ee68.slice/crio-e324e672eec750c227dd340410d0943ae5a9208b2c8a3a3eb44a2dc396ee8095 WatchSource:0}: Error finding container e324e672eec750c227dd340410d0943ae5a9208b2c8a3a3eb44a2dc396ee8095: Status 404 returned error can't find the container with id e324e672eec750c227dd340410d0943ae5a9208b2c8a3a3eb44a2dc396ee8095 Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.908759 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 07:59:52.89558813 +0000 UTC Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.908872 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 21 10:57:10 crc kubenswrapper[4925]: I0121 10:57:10.955264 4925 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 21 10:57:11 crc kubenswrapper[4925]: I0121 10:57:11.317783 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" event={"ID":"1b1fd621-6a72-4e19-b778-3a8e3950ee68","Type":"ContainerStarted","Data":"499f855df26ae8b9e23b2be49ac92e6d0d24869ee2052ceb85b6fb78680967d7"} Jan 21 10:57:11 crc kubenswrapper[4925]: I0121 10:57:11.317937 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" event={"ID":"1b1fd621-6a72-4e19-b778-3a8e3950ee68","Type":"ContainerStarted","Data":"e324e672eec750c227dd340410d0943ae5a9208b2c8a3a3eb44a2dc396ee8095"} Jan 21 10:57:11 crc kubenswrapper[4925]: I0121 10:57:11.338231 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t6bnk" podStartSLOduration=85.338204149 podStartE2EDuration="1m25.338204149s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:11.337324679 +0000 UTC m=+122.941216613" watchObservedRunningTime="2026-01-21 10:57:11.338204149 +0000 UTC m=+122.942096083" Jan 21 10:57:11 crc kubenswrapper[4925]: I0121 10:57:11.511148 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:11 crc kubenswrapper[4925]: E0121 10:57:11.511353 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:12 crc kubenswrapper[4925]: I0121 10:57:12.501321 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:12 crc kubenswrapper[4925]: I0121 10:57:12.501553 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:12 crc kubenswrapper[4925]: I0121 10:57:12.501635 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:12 crc kubenswrapper[4925]: E0121 10:57:12.502115 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:12 crc kubenswrapper[4925]: E0121 10:57:12.502253 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:12 crc kubenswrapper[4925]: E0121 10:57:12.502474 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:13 crc kubenswrapper[4925]: I0121 10:57:13.501769 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:13 crc kubenswrapper[4925]: E0121 10:57:13.502330 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:14 crc kubenswrapper[4925]: I0121 10:57:14.501498 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:14 crc kubenswrapper[4925]: I0121 10:57:14.501617 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:14 crc kubenswrapper[4925]: E0121 10:57:14.501659 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:14 crc kubenswrapper[4925]: I0121 10:57:14.501892 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:14 crc kubenswrapper[4925]: E0121 10:57:14.502327 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:14 crc kubenswrapper[4925]: I0121 10:57:14.502333 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 10:57:14 crc kubenswrapper[4925]: E0121 10:57:14.502180 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:14 crc kubenswrapper[4925]: E0121 10:57:14.502747 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:57:14 crc kubenswrapper[4925]: E0121 10:57:14.840360 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:15 crc kubenswrapper[4925]: I0121 10:57:15.501090 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:15 crc kubenswrapper[4925]: E0121 10:57:15.501344 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:16 crc kubenswrapper[4925]: I0121 10:57:16.501640 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:16 crc kubenswrapper[4925]: I0121 10:57:16.501778 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:16 crc kubenswrapper[4925]: E0121 10:57:16.501837 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:16 crc kubenswrapper[4925]: I0121 10:57:16.501862 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:16 crc kubenswrapper[4925]: E0121 10:57:16.502019 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:16 crc kubenswrapper[4925]: E0121 10:57:16.502088 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:17 crc kubenswrapper[4925]: I0121 10:57:17.501244 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:17 crc kubenswrapper[4925]: E0121 10:57:17.501856 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:18 crc kubenswrapper[4925]: I0121 10:57:18.501260 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:18 crc kubenswrapper[4925]: E0121 10:57:18.501818 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:18 crc kubenswrapper[4925]: I0121 10:57:18.501260 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:18 crc kubenswrapper[4925]: E0121 10:57:18.501899 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:18 crc kubenswrapper[4925]: I0121 10:57:18.501260 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:18 crc kubenswrapper[4925]: E0121 10:57:18.501964 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:19 crc kubenswrapper[4925]: I0121 10:57:19.501096 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:19 crc kubenswrapper[4925]: E0121 10:57:19.502944 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:19 crc kubenswrapper[4925]: E0121 10:57:19.841180 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:20 crc kubenswrapper[4925]: I0121 10:57:20.501574 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:20 crc kubenswrapper[4925]: I0121 10:57:20.501592 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:20 crc kubenswrapper[4925]: E0121 10:57:20.501807 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:20 crc kubenswrapper[4925]: I0121 10:57:20.501613 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:20 crc kubenswrapper[4925]: E0121 10:57:20.501946 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:20 crc kubenswrapper[4925]: E0121 10:57:20.501972 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:21 crc kubenswrapper[4925]: I0121 10:57:21.501373 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:21 crc kubenswrapper[4925]: E0121 10:57:21.501581 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:22 crc kubenswrapper[4925]: I0121 10:57:22.501057 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:22 crc kubenswrapper[4925]: I0121 10:57:22.501063 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:22 crc kubenswrapper[4925]: I0121 10:57:22.501082 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:22 crc kubenswrapper[4925]: E0121 10:57:22.501241 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:22 crc kubenswrapper[4925]: E0121 10:57:22.501626 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:22 crc kubenswrapper[4925]: E0121 10:57:22.501729 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:23 crc kubenswrapper[4925]: I0121 10:57:23.501376 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:23 crc kubenswrapper[4925]: E0121 10:57:23.501677 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:24 crc kubenswrapper[4925]: I0121 10:57:24.501095 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:24 crc kubenswrapper[4925]: I0121 10:57:24.501167 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:24 crc kubenswrapper[4925]: E0121 10:57:24.501282 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:24 crc kubenswrapper[4925]: I0121 10:57:24.501212 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:24 crc kubenswrapper[4925]: E0121 10:57:24.501438 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:24 crc kubenswrapper[4925]: E0121 10:57:24.501641 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:24 crc kubenswrapper[4925]: E0121 10:57:24.843552 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:25 crc kubenswrapper[4925]: I0121 10:57:25.501483 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:25 crc kubenswrapper[4925]: E0121 10:57:25.501712 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:26 crc kubenswrapper[4925]: I0121 10:57:26.500909 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:26 crc kubenswrapper[4925]: E0121 10:57:26.501099 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:26 crc kubenswrapper[4925]: I0121 10:57:26.501316 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:26 crc kubenswrapper[4925]: E0121 10:57:26.501376 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:26 crc kubenswrapper[4925]: I0121 10:57:26.501515 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:26 crc kubenswrapper[4925]: E0121 10:57:26.501565 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:27 crc kubenswrapper[4925]: I0121 10:57:27.501079 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:27 crc kubenswrapper[4925]: E0121 10:57:27.501248 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:28 crc kubenswrapper[4925]: I0121 10:57:28.500914 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:28 crc kubenswrapper[4925]: I0121 10:57:28.500921 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:28 crc kubenswrapper[4925]: I0121 10:57:28.500991 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:28 crc kubenswrapper[4925]: E0121 10:57:28.501189 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:28 crc kubenswrapper[4925]: E0121 10:57:28.501522 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:28 crc kubenswrapper[4925]: E0121 10:57:28.502256 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:28 crc kubenswrapper[4925]: I0121 10:57:28.502571 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 10:57:28 crc kubenswrapper[4925]: E0121 10:57:28.502806 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:57:29 crc kubenswrapper[4925]: I0121 10:57:29.500642 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:29 crc kubenswrapper[4925]: E0121 10:57:29.502173 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:29 crc kubenswrapper[4925]: E0121 10:57:29.844430 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:30 crc kubenswrapper[4925]: I0121 10:57:30.500951 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:30 crc kubenswrapper[4925]: I0121 10:57:30.501070 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:30 crc kubenswrapper[4925]: E0121 10:57:30.501162 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:30 crc kubenswrapper[4925]: I0121 10:57:30.500970 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:30 crc kubenswrapper[4925]: E0121 10:57:30.501297 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:30 crc kubenswrapper[4925]: E0121 10:57:30.501387 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:31 crc kubenswrapper[4925]: I0121 10:57:31.501253 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:31 crc kubenswrapper[4925]: E0121 10:57:31.501488 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.501207 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.501549 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:32 crc kubenswrapper[4925]: E0121 10:57:32.501587 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.501721 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:32 crc kubenswrapper[4925]: E0121 10:57:32.501804 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:32 crc kubenswrapper[4925]: E0121 10:57:32.501824 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.520753 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/1.log" Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.521965 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/0.log" Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.522029 4925 generic.go:334] "Generic (PLEG): container finished" podID="82b678c3-b1e1-4294-9f9f-02103a6823cc" containerID="61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22" exitCode=1 Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.522093 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerDied","Data":"61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22"} Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.522144 4925 scope.go:117] "RemoveContainer" containerID="7e2b836bc953dab3a22a54920339d88cab19930256753a43e46b28811b36c3a6" Jan 21 10:57:32 crc kubenswrapper[4925]: I0121 10:57:32.523038 4925 scope.go:117] "RemoveContainer" containerID="61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22" Jan 21 10:57:32 crc kubenswrapper[4925]: E0121 10:57:32.523259 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-hwzqb_openshift-multus(82b678c3-b1e1-4294-9f9f-02103a6823cc)\"" pod="openshift-multus/multus-hwzqb" podUID="82b678c3-b1e1-4294-9f9f-02103a6823cc" Jan 21 10:57:33 crc kubenswrapper[4925]: I0121 10:57:33.501111 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:33 crc kubenswrapper[4925]: E0121 10:57:33.501721 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:33 crc kubenswrapper[4925]: I0121 10:57:33.528975 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/1.log" Jan 21 10:57:34 crc kubenswrapper[4925]: I0121 10:57:34.501048 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:34 crc kubenswrapper[4925]: I0121 10:57:34.501164 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:34 crc kubenswrapper[4925]: I0121 10:57:34.501216 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:34 crc kubenswrapper[4925]: E0121 10:57:34.501329 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:34 crc kubenswrapper[4925]: E0121 10:57:34.501480 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:34 crc kubenswrapper[4925]: E0121 10:57:34.501559 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:34 crc kubenswrapper[4925]: E0121 10:57:34.846136 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:35 crc kubenswrapper[4925]: I0121 10:57:35.501200 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:35 crc kubenswrapper[4925]: E0121 10:57:35.501423 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:36 crc kubenswrapper[4925]: I0121 10:57:36.501167 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:36 crc kubenswrapper[4925]: I0121 10:57:36.501205 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:36 crc kubenswrapper[4925]: I0121 10:57:36.501218 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:36 crc kubenswrapper[4925]: E0121 10:57:36.501356 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:36 crc kubenswrapper[4925]: E0121 10:57:36.501490 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:36 crc kubenswrapper[4925]: E0121 10:57:36.501563 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:37 crc kubenswrapper[4925]: I0121 10:57:37.501585 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:37 crc kubenswrapper[4925]: E0121 10:57:37.501825 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:38 crc kubenswrapper[4925]: I0121 10:57:38.501594 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:38 crc kubenswrapper[4925]: I0121 10:57:38.501630 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:38 crc kubenswrapper[4925]: E0121 10:57:38.501817 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:38 crc kubenswrapper[4925]: E0121 10:57:38.501921 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:38 crc kubenswrapper[4925]: I0121 10:57:38.501630 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:38 crc kubenswrapper[4925]: E0121 10:57:38.502031 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:39 crc kubenswrapper[4925]: I0121 10:57:39.501186 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:39 crc kubenswrapper[4925]: E0121 10:57:39.502349 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:39 crc kubenswrapper[4925]: E0121 10:57:39.847260 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:40 crc kubenswrapper[4925]: I0121 10:57:40.500712 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:40 crc kubenswrapper[4925]: E0121 10:57:40.500876 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:40 crc kubenswrapper[4925]: I0121 10:57:40.500739 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:40 crc kubenswrapper[4925]: E0121 10:57:40.500947 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:40 crc kubenswrapper[4925]: I0121 10:57:40.500712 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:40 crc kubenswrapper[4925]: E0121 10:57:40.501031 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:41 crc kubenswrapper[4925]: I0121 10:57:41.501283 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:41 crc kubenswrapper[4925]: E0121 10:57:41.502173 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:41 crc kubenswrapper[4925]: I0121 10:57:41.502295 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 10:57:41 crc kubenswrapper[4925]: E0121 10:57:41.502723 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-9hk9g_openshift-ovn-kubernetes(3a976857-73df-49d9-9b7e-b5cb3d250a5f)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" Jan 21 10:57:42 crc kubenswrapper[4925]: I0121 10:57:42.501130 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:42 crc kubenswrapper[4925]: I0121 10:57:42.501195 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:42 crc kubenswrapper[4925]: I0121 10:57:42.501299 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:42 crc kubenswrapper[4925]: E0121 10:57:42.501469 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:42 crc kubenswrapper[4925]: E0121 10:57:42.501613 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:42 crc kubenswrapper[4925]: E0121 10:57:42.501914 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:43 crc kubenswrapper[4925]: I0121 10:57:43.501138 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:43 crc kubenswrapper[4925]: E0121 10:57:43.501485 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:44 crc kubenswrapper[4925]: I0121 10:57:44.501692 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:44 crc kubenswrapper[4925]: I0121 10:57:44.501799 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:44 crc kubenswrapper[4925]: E0121 10:57:44.501918 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:44 crc kubenswrapper[4925]: E0121 10:57:44.502012 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:44 crc kubenswrapper[4925]: I0121 10:57:44.501745 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:44 crc kubenswrapper[4925]: E0121 10:57:44.502133 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:44 crc kubenswrapper[4925]: E0121 10:57:44.849323 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:45 crc kubenswrapper[4925]: I0121 10:57:45.500942 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:45 crc kubenswrapper[4925]: E0121 10:57:45.501321 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:45 crc kubenswrapper[4925]: I0121 10:57:45.501950 4925 scope.go:117] "RemoveContainer" containerID="61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22" Jan 21 10:57:46 crc kubenswrapper[4925]: I0121 10:57:46.501556 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:46 crc kubenswrapper[4925]: I0121 10:57:46.501598 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:46 crc kubenswrapper[4925]: E0121 10:57:46.502196 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:46 crc kubenswrapper[4925]: I0121 10:57:46.501605 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:46 crc kubenswrapper[4925]: E0121 10:57:46.502345 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:46 crc kubenswrapper[4925]: E0121 10:57:46.502649 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:46 crc kubenswrapper[4925]: I0121 10:57:46.585926 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/1.log" Jan 21 10:57:46 crc kubenswrapper[4925]: I0121 10:57:46.586023 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerStarted","Data":"429dfd6605e5b4b9683bdedbdf8361a34f0bc590cc74d63cb513a31aaca12791"} Jan 21 10:57:47 crc kubenswrapper[4925]: I0121 10:57:47.501045 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:47 crc kubenswrapper[4925]: E0121 10:57:47.501247 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:48 crc kubenswrapper[4925]: I0121 10:57:48.500984 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:48 crc kubenswrapper[4925]: I0121 10:57:48.501010 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:48 crc kubenswrapper[4925]: I0121 10:57:48.501193 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:48 crc kubenswrapper[4925]: E0121 10:57:48.501306 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:48 crc kubenswrapper[4925]: E0121 10:57:48.501531 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:48 crc kubenswrapper[4925]: E0121 10:57:48.501783 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:49 crc kubenswrapper[4925]: I0121 10:57:49.500755 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.503129 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.849973 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:49 crc kubenswrapper[4925]: I0121 10:57:49.942465 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:57:49 crc kubenswrapper[4925]: I0121 10:57:49.942602 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:49 crc kubenswrapper[4925]: I0121 10:57:49.942648 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:49 crc kubenswrapper[4925]: I0121 10:57:49.942675 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:49 crc kubenswrapper[4925]: I0121 10:57:49.942700 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.942892 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.942929 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.942946 4925 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943069 4925 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943191 4925 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.942894 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:59:51.942837544 +0000 UTC m=+283.546729478 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943346 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 10:59:51.943241337 +0000 UTC m=+283.547133381 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943449 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:59:51.943434734 +0000 UTC m=+283.547326668 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943466 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 10:59:51.943455515 +0000 UTC m=+283.547347449 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943503 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943523 4925 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943535 4925 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:57:49 crc kubenswrapper[4925]: E0121 10:57:49.943565 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 10:59:51.943556238 +0000 UTC m=+283.547448272 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 10:57:50 crc kubenswrapper[4925]: I0121 10:57:50.500752 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:50 crc kubenswrapper[4925]: I0121 10:57:50.500843 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:50 crc kubenswrapper[4925]: E0121 10:57:50.500956 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:50 crc kubenswrapper[4925]: I0121 10:57:50.500971 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:50 crc kubenswrapper[4925]: E0121 10:57:50.501195 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:50 crc kubenswrapper[4925]: E0121 10:57:50.501243 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:51 crc kubenswrapper[4925]: I0121 10:57:51.501298 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:51 crc kubenswrapper[4925]: E0121 10:57:51.501491 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:52 crc kubenswrapper[4925]: I0121 10:57:52.501176 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:52 crc kubenswrapper[4925]: I0121 10:57:52.501234 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:52 crc kubenswrapper[4925]: E0121 10:57:52.501700 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:52 crc kubenswrapper[4925]: E0121 10:57:52.501900 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:52 crc kubenswrapper[4925]: I0121 10:57:52.502466 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:52 crc kubenswrapper[4925]: E0121 10:57:52.502751 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:53 crc kubenswrapper[4925]: I0121 10:57:53.501651 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:53 crc kubenswrapper[4925]: E0121 10:57:53.501934 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:54 crc kubenswrapper[4925]: I0121 10:57:54.501338 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:54 crc kubenswrapper[4925]: I0121 10:57:54.501576 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:54 crc kubenswrapper[4925]: I0121 10:57:54.501492 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:54 crc kubenswrapper[4925]: E0121 10:57:54.501712 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:54 crc kubenswrapper[4925]: E0121 10:57:54.502045 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:54 crc kubenswrapper[4925]: E0121 10:57:54.501916 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:54 crc kubenswrapper[4925]: E0121 10:57:54.851299 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:57:55 crc kubenswrapper[4925]: I0121 10:57:55.501108 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:55 crc kubenswrapper[4925]: E0121 10:57:55.501341 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:55 crc kubenswrapper[4925]: I0121 10:57:55.502461 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 10:57:56 crc kubenswrapper[4925]: I0121 10:57:56.501014 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:56 crc kubenswrapper[4925]: I0121 10:57:56.501072 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:56 crc kubenswrapper[4925]: E0121 10:57:56.501785 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:56 crc kubenswrapper[4925]: I0121 10:57:56.501102 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:56 crc kubenswrapper[4925]: E0121 10:57:56.501921 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:56 crc kubenswrapper[4925]: E0121 10:57:56.502050 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:56 crc kubenswrapper[4925]: I0121 10:57:56.635618 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/3.log" Jan 21 10:57:56 crc kubenswrapper[4925]: I0121 10:57:56.640110 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerStarted","Data":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} Jan 21 10:57:57 crc kubenswrapper[4925]: I0121 10:57:57.501283 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:57 crc kubenswrapper[4925]: E0121 10:57:57.501505 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:57 crc kubenswrapper[4925]: I0121 10:57:57.643602 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:57:57 crc kubenswrapper[4925]: I0121 10:57:57.691173 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podStartSLOduration=130.691134363 podStartE2EDuration="2m10.691134363s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:57:57.686798939 +0000 UTC m=+169.290690883" watchObservedRunningTime="2026-01-21 10:57:57.691134363 +0000 UTC m=+169.295026297" Jan 21 10:57:58 crc kubenswrapper[4925]: I0121 10:57:58.243289 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-2txwq"] Jan 21 10:57:58 crc kubenswrapper[4925]: I0121 10:57:58.243567 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:58 crc kubenswrapper[4925]: E0121 10:57:58.243693 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:58 crc kubenswrapper[4925]: I0121 10:57:58.502048 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:57:58 crc kubenswrapper[4925]: I0121 10:57:58.502130 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:57:58 crc kubenswrapper[4925]: E0121 10:57:58.502758 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:57:58 crc kubenswrapper[4925]: E0121 10:57:58.503119 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:57:59 crc kubenswrapper[4925]: I0121 10:57:59.501591 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:57:59 crc kubenswrapper[4925]: I0121 10:57:59.501734 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:57:59 crc kubenswrapper[4925]: E0121 10:57:59.503921 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:57:59 crc kubenswrapper[4925]: E0121 10:57:59.504130 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:57:59 crc kubenswrapper[4925]: E0121 10:57:59.852100 4925 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 10:58:00 crc kubenswrapper[4925]: I0121 10:58:00.501631 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:58:00 crc kubenswrapper[4925]: E0121 10:58:00.501834 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:58:00 crc kubenswrapper[4925]: I0121 10:58:00.501833 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:58:00 crc kubenswrapper[4925]: E0121 10:58:00.501968 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:58:01 crc kubenswrapper[4925]: I0121 10:58:01.501176 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:58:01 crc kubenswrapper[4925]: I0121 10:58:01.501185 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:58:01 crc kubenswrapper[4925]: E0121 10:58:01.501996 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:58:01 crc kubenswrapper[4925]: E0121 10:58:01.502062 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:58:02 crc kubenswrapper[4925]: I0121 10:58:02.501531 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:58:02 crc kubenswrapper[4925]: E0121 10:58:02.501692 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:58:02 crc kubenswrapper[4925]: I0121 10:58:02.501766 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:58:02 crc kubenswrapper[4925]: E0121 10:58:02.501812 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:58:03 crc kubenswrapper[4925]: I0121 10:58:03.501523 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:58:03 crc kubenswrapper[4925]: I0121 10:58:03.501639 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:58:03 crc kubenswrapper[4925]: E0121 10:58:03.501738 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2txwq" podUID="5c3596d1-1f08-4703-ab63-c29358aac0d9" Jan 21 10:58:03 crc kubenswrapper[4925]: E0121 10:58:03.501854 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 10:58:04 crc kubenswrapper[4925]: I0121 10:58:04.501234 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:58:04 crc kubenswrapper[4925]: I0121 10:58:04.501379 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:58:04 crc kubenswrapper[4925]: E0121 10:58:04.501567 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 10:58:04 crc kubenswrapper[4925]: E0121 10:58:04.501632 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 10:58:05 crc kubenswrapper[4925]: I0121 10:58:05.501441 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:58:05 crc kubenswrapper[4925]: I0121 10:58:05.501568 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:58:05 crc kubenswrapper[4925]: I0121 10:58:05.505501 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 21 10:58:05 crc kubenswrapper[4925]: I0121 10:58:05.506171 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 21 10:58:05 crc kubenswrapper[4925]: I0121 10:58:05.506475 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 21 10:58:05 crc kubenswrapper[4925]: I0121 10:58:05.506796 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 21 10:58:06 crc kubenswrapper[4925]: I0121 10:58:06.500970 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:58:06 crc kubenswrapper[4925]: I0121 10:58:06.501004 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:58:06 crc kubenswrapper[4925]: I0121 10:58:06.504536 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 21 10:58:06 crc kubenswrapper[4925]: I0121 10:58:06.505003 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 21 10:58:11 crc kubenswrapper[4925]: I0121 10:58:11.854056 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:58:11 crc kubenswrapper[4925]: I0121 10:58:11.857373 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 21 10:58:11 crc kubenswrapper[4925]: I0121 10:58:11.873755 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5c3596d1-1f08-4703-ab63-c29358aac0d9-metrics-certs\") pod \"network-metrics-daemon-2txwq\" (UID: \"5c3596d1-1f08-4703-ab63-c29358aac0d9\") " pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.002251 4925 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.343063 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.349019 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2txwq" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.357366 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.357914 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dz6wr"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.358166 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.358508 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.359013 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.359479 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.359501 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.360108 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.363205 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bmpxp"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.368098 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.375691 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-pxkk7"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.376453 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vwhv9"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.376736 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4d577"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.376760 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.377015 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.377018 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.377129 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.377404 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.383536 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-7lrsj"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.384749 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.398110 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.398232 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.398375 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.398562 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.398770 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.398965 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.399178 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.399427 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.399672 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.399903 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.400113 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.400340 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.400675 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.401759 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.402142 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-vw8cb"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.402376 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.402752 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.402942 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.403005 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.403100 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.403212 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.403465 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.403518 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.403691 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.403852 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404066 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404138 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404206 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404313 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404498 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404570 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404620 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404731 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.404970 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.405193 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.405691 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.405754 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.405997 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.406060 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.406219 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.406457 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.406546 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.407205 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.408924 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.409086 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.422050 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.422607 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.441633 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.442383 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.442820 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.454898 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.459132 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.462143 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-4kjnr"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.462654 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.462907 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.463237 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.463490 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.463685 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.463881 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-kk7wd"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.466126 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.466171 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467017 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-serving-cert\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467133 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn679\" (UniqueName: \"kubernetes.io/projected/5a264bb6-3e63-4411-b0a4-95be21527653-kube-api-access-dn679\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467163 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wz4nd\" (UniqueName: \"kubernetes.io/projected/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-kube-api-access-wz4nd\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467227 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467252 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/81742698-2b71-46b2-93fa-1552cfa27f8a-machine-approver-tls\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467314 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467382 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486622 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81742698-2b71-46b2-93fa-1552cfa27f8a-config\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486685 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-serving-cert\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486727 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc99b\" (UniqueName: \"kubernetes.io/projected/81742698-2b71-46b2-93fa-1552cfa27f8a-kube-api-access-cc99b\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486755 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-config\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486769 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-audit\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486787 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486813 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486833 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-image-import-ca\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486850 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-668sl\" (UniqueName: \"kubernetes.io/projected/59445cd0-2391-49e1-9a4e-6ca280c8ab85-kube-api-access-668sl\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486899 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-dir\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486942 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e0376dda-f02a-464e-ae41-18d6fddd7097-node-pullsecrets\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486957 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486980 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-oauth-serving-cert\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.486999 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9b96df7-05d7-4cd0-9e30-c5e485f31804-config\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487016 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4np7\" (UniqueName: \"kubernetes.io/projected/8f1540bb-bd69-4f44-ac02-8da0575056e1-kube-api-access-j4np7\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487066 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-encryption-config\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487081 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-service-ca\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487141 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hd5c\" (UniqueName: \"kubernetes.io/projected/e0376dda-f02a-464e-ae41-18d6fddd7097-kube-api-access-6hd5c\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487195 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-etcd-serving-ca\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487233 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-serving-cert\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487252 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487272 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-client-ca\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487308 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-etcd-client\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487349 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78713d9a-139c-4d4e-8068-a0d0d98b86df-config\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487410 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e0376dda-f02a-464e-ae41-18d6fddd7097-audit-dir\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487459 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-etcd-client\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487510 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-config\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487558 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vvzw\" (UniqueName: \"kubernetes.io/projected/b9b96df7-05d7-4cd0-9e30-c5e485f31804-kube-api-access-8vvzw\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487611 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tt9vp\" (UniqueName: \"kubernetes.io/projected/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-kube-api-access-tt9vp\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487650 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487692 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-policies\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487712 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487756 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1540bb-bd69-4f44-ac02-8da0575056e1-serving-cert\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487799 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmd77\" (UniqueName: \"kubernetes.io/projected/b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9-kube-api-access-cmd77\") pod \"cluster-samples-operator-665b6dd947-gqsng\" (UID: \"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487856 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-encryption-config\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487880 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-audit-dir\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487900 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6zm6\" (UniqueName: \"kubernetes.io/projected/b4eed50b-ef22-4637-9aa1-d8528310aed1-kube-api-access-c6zm6\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.487968 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-trusted-ca-bundle\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488021 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/81742698-2b71-46b2-93fa-1552cfa27f8a-auth-proxy-config\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488052 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/78713d9a-139c-4d4e-8068-a0d0d98b86df-serving-cert\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488068 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-config\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488082 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-serving-cert\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488104 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a264bb6-3e63-4411-b0a4-95be21527653-config\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488118 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9b96df7-05d7-4cd0-9e30-c5e485f31804-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488148 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488169 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488183 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gqsng\" (UID: \"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488200 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-config\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488215 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hlcq\" (UniqueName: \"kubernetes.io/projected/890e3b6e-bd8d-438c-992b-508bb751bdca-kube-api-access-9hlcq\") pod \"downloads-7954f5f757-vw8cb\" (UID: \"890e3b6e-bd8d-438c-992b-508bb751bdca\") " pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488234 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5a264bb6-3e63-4411-b0a4-95be21527653-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488248 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-oauth-config\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488271 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-trusted-ca-bundle\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488311 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-audit-policies\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488336 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5a264bb6-3e63-4411-b0a4-95be21527653-images\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488354 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-client-ca\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467037 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.485773 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b7tzs"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.467065 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.488377 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/78713d9a-139c-4d4e-8068-a0d0d98b86df-trusted-ca\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.473886 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.489276 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.473923 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.489340 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.489382 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs25v\" (UniqueName: \"kubernetes.io/projected/78713d9a-139c-4d4e-8068-a0d0d98b86df-kube-api-access-rs25v\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.489434 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.489458 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.473997 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474011 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.489338 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474070 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474198 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474295 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474433 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474564 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474614 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474712 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474842 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.474897 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.475022 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.485971 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.490471 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dz6wr"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.490673 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.502328 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.502531 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.503852 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.504061 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.504119 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.504384 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.504535 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.504658 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.504802 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.504963 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.505154 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.512508 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.515488 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-n2k47"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.516426 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.517165 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.517683 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.518087 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.518344 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.523094 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.539331 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.539855 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8ht27"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.540431 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.542378 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.542875 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.547150 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605619 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1540bb-bd69-4f44-ac02-8da0575056e1-serving-cert\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605705 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05b134e2-b96d-4a00-9681-6c8ce017bc74-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605762 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605815 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dfbafd02-2fcf-4a25-a454-ade91c336036-metrics-tls\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605850 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmd77\" (UniqueName: \"kubernetes.io/projected/b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9-kube-api-access-cmd77\") pod \"cluster-samples-operator-665b6dd947-gqsng\" (UID: \"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605879 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jwhj\" (UniqueName: \"kubernetes.io/projected/3dddbbd4-eb3f-436d-8c53-cf413cecca31-kube-api-access-9jwhj\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605917 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-encryption-config\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605952 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-audit-dir\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.605990 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-trusted-ca-bundle\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606027 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6zm6\" (UniqueName: \"kubernetes.io/projected/b4eed50b-ef22-4637-9aa1-d8528310aed1-kube-api-access-c6zm6\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606067 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbjc8\" (UniqueName: \"kubernetes.io/projected/05b134e2-b96d-4a00-9681-6c8ce017bc74-kube-api-access-hbjc8\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606109 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/81742698-2b71-46b2-93fa-1552cfa27f8a-auth-proxy-config\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606155 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlq2b\" (UniqueName: \"kubernetes.io/projected/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-kube-api-access-qlq2b\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606187 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/78713d9a-139c-4d4e-8068-a0d0d98b86df-serving-cert\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606213 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-config\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606308 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-serving-cert\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606348 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a264bb6-3e63-4411-b0a4-95be21527653-config\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606374 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606432 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606481 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gqsng\" (UID: \"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606507 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-config\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606555 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hlcq\" (UniqueName: \"kubernetes.io/projected/890e3b6e-bd8d-438c-992b-508bb751bdca-kube-api-access-9hlcq\") pod \"downloads-7954f5f757-vw8cb\" (UID: \"890e3b6e-bd8d-438c-992b-508bb751bdca\") " pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606592 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-config\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606614 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9b96df7-05d7-4cd0-9e30-c5e485f31804-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606655 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbafd02-2fcf-4a25-a454-ade91c336036-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606696 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5a264bb6-3e63-4411-b0a4-95be21527653-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606725 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-oauth-config\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606749 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-trusted-ca-bundle\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606777 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-audit-policies\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606824 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks5r5\" (UniqueName: \"kubernetes.io/projected/5ad56032-3192-4b42-b7ca-a8c3bac978d1-kube-api-access-ks5r5\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606863 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5a264bb6-3e63-4411-b0a4-95be21527653-images\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606891 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-service-ca-bundle\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606914 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-config\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.606978 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607004 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/78713d9a-139c-4d4e-8068-a0d0d98b86df-trusted-ca\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607026 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-client-ca\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607045 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607075 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607112 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607137 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs25v\" (UniqueName: \"kubernetes.io/projected/78713d9a-139c-4d4e-8068-a0d0d98b86df-kube-api-access-rs25v\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607168 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6405bedd-bfe2-411b-937d-8f309fc6d0e8-proxy-tls\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607189 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jshq\" (UniqueName: \"kubernetes.io/projected/be500af9-e814-41db-be2a-e4f3fa9d46bb-kube-api-access-5jshq\") pod \"dns-operator-744455d44c-kk7wd\" (UID: \"be500af9-e814-41db-be2a-e4f3fa9d46bb\") " pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607215 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607237 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-serving-cert\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607258 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp55x\" (UniqueName: \"kubernetes.io/projected/7cbece9b-282a-4634-b41d-85f872a5be93-kube-api-access-mp55x\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607275 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5ad56032-3192-4b42-b7ca-a8c3bac978d1-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607298 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/91b59e6a-27bf-49a6-99c0-cb20160980ac-images\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607317 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/91b59e6a-27bf-49a6-99c0-cb20160980ac-proxy-tls\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607342 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn679\" (UniqueName: \"kubernetes.io/projected/5a264bb6-3e63-4411-b0a4-95be21527653-kube-api-access-dn679\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607380 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wz4nd\" (UniqueName: \"kubernetes.io/projected/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-kube-api-access-wz4nd\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607424 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v54mn\" (UniqueName: \"kubernetes.io/projected/91b59e6a-27bf-49a6-99c0-cb20160980ac-kube-api-access-v54mn\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607460 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607480 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/81742698-2b71-46b2-93fa-1552cfa27f8a-machine-approver-tls\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607503 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-service-ca-bundle\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607532 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-stats-auth\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607559 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/be500af9-e814-41db-be2a-e4f3fa9d46bb-metrics-tls\") pod \"dns-operator-744455d44c-kk7wd\" (UID: \"be500af9-e814-41db-be2a-e4f3fa9d46bb\") " pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607591 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607622 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607661 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81742698-2b71-46b2-93fa-1552cfa27f8a-config\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607707 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5ad56032-3192-4b42-b7ca-a8c3bac978d1-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607742 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbl6s\" (UniqueName: \"kubernetes.io/projected/6405bedd-bfe2-411b-937d-8f309fc6d0e8-kube-api-access-tbl6s\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607767 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3dddbbd4-eb3f-436d-8c53-cf413cecca31-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607800 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cbece9b-282a-4634-b41d-85f872a5be93-serving-cert\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607834 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-serving-cert\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607861 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc99b\" (UniqueName: \"kubernetes.io/projected/81742698-2b71-46b2-93fa-1552cfa27f8a-kube-api-access-cc99b\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607892 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-config\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607921 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-audit\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607949 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.607978 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/91b59e6a-27bf-49a6-99c0-cb20160980ac-auth-proxy-config\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608006 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608029 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dddbbd4-eb3f-436d-8c53-cf413cecca31-serving-cert\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608060 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqqnn\" (UniqueName: \"kubernetes.io/projected/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-kube-api-access-qqqnn\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608109 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-image-import-ca\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608179 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-668sl\" (UniqueName: \"kubernetes.io/projected/59445cd0-2391-49e1-9a4e-6ca280c8ab85-kube-api-access-668sl\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608208 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-dir\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608236 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-serving-cert\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608261 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tkqj\" (UniqueName: \"kubernetes.io/projected/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-kube-api-access-8tkqj\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608286 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfbafd02-2fcf-4a25-a454-ade91c336036-trusted-ca\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608311 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5ad56032-3192-4b42-b7ca-a8c3bac978d1-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608337 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e0376dda-f02a-464e-ae41-18d6fddd7097-node-pullsecrets\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608362 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.610132 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.613365 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.624598 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.627603 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.628084 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.608390 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-default-certificate\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631463 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-oauth-serving-cert\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631541 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9b96df7-05d7-4cd0-9e30-c5e485f31804-config\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631644 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4np7\" (UniqueName: \"kubernetes.io/projected/8f1540bb-bd69-4f44-ac02-8da0575056e1-kube-api-access-j4np7\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631714 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-metrics-certs\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631771 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-encryption-config\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631803 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-service-ca\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631831 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631854 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4s82q\" (UniqueName: \"kubernetes.io/projected/dfbafd02-2fcf-4a25-a454-ade91c336036-kube-api-access-4s82q\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631883 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6405bedd-bfe2-411b-937d-8f309fc6d0e8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631927 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hd5c\" (UniqueName: \"kubernetes.io/projected/e0376dda-f02a-464e-ae41-18d6fddd7097-kube-api-access-6hd5c\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.631954 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-client\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632009 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-serving-cert\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632044 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632077 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-client-ca\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632103 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-etcd-client\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632133 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-etcd-serving-ca\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632161 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e0376dda-f02a-464e-ae41-18d6fddd7097-audit-dir\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632189 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-etcd-client\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632220 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-config\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632249 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-ca\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632277 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-service-ca\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632307 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78713d9a-139c-4d4e-8068-a0d0d98b86df-config\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632337 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05b134e2-b96d-4a00-9681-6c8ce017bc74-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632365 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vvzw\" (UniqueName: \"kubernetes.io/projected/b9b96df7-05d7-4cd0-9e30-c5e485f31804-kube-api-access-8vvzw\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632412 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tt9vp\" (UniqueName: \"kubernetes.io/projected/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-kube-api-access-tt9vp\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632443 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632477 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-policies\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632504 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632529 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9b96df7-05d7-4cd0-9e30-c5e485f31804-config\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632533 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.632603 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqs9d\" (UniqueName: \"kubernetes.io/projected/68968bee-6187-43fa-bad4-ab1eb83e9c68-kube-api-access-jqs9d\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.633813 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-oauth-serving-cert\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.636936 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.637485 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.637654 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.637921 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.638329 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.638592 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-dir\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.640726 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.642030 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.644325 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.645181 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.652195 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-config\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.661722 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-audit-dir\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.663285 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e0376dda-f02a-464e-ae41-18d6fddd7097-node-pullsecrets\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.663563 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-etcd-serving-ca\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.664646 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-image-import-ca\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.670924 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.671110 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-serving-cert\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.696811 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-encryption-config\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.705767 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-service-ca\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.706361 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.707375 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9b96df7-05d7-4cd0-9e30-c5e485f31804-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.716835 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-etcd-client\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.717090 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/78713d9a-139c-4d4e-8068-a0d0d98b86df-trusted-ca\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.718083 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/81742698-2b71-46b2-93fa-1552cfa27f8a-machine-approver-tls\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.719712 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-serving-cert\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.720113 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-encryption-config\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.720663 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.722251 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81742698-2b71-46b2-93fa-1552cfa27f8a-config\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.722445 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5a264bb6-3e63-4411-b0a4-95be21527653-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.723321 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a264bb6-3e63-4411-b0a4-95be21527653-config\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.723500 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-config\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.724181 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-client-ca\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.726065 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/78713d9a-139c-4d4e-8068-a0d0d98b86df-serving-cert\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.727134 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5a264bb6-3e63-4411-b0a4-95be21527653-images\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.727661 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-config\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.730217 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gqsng\" (UID: \"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.730272 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e0376dda-f02a-464e-ae41-18d6fddd7097-audit-dir\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.731750 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.733514 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.733695 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.733776 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.734643 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e0376dda-f02a-464e-ae41-18d6fddd7097-serving-cert\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.735200 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78713d9a-139c-4d4e-8068-a0d0d98b86df-config\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.735930 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-policies\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.737523 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-trusted-ca-bundle\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.738610 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.739786 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.739886 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.739913 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-client-ca\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740336 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-config\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740370 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.744764 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.744815 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/81742698-2b71-46b2-93fa-1552cfa27f8a-auth-proxy-config\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741655 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-audit-policies\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741784 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6405bedd-bfe2-411b-937d-8f309fc6d0e8-proxy-tls\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.744939 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp55x\" (UniqueName: \"kubernetes.io/projected/7cbece9b-282a-4634-b41d-85f872a5be93-kube-api-access-mp55x\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.744964 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5ad56032-3192-4b42-b7ca-a8c3bac978d1-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745314 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/be500af9-e814-41db-be2a-e4f3fa9d46bb-metrics-tls\") pod \"dns-operator-744455d44c-kk7wd\" (UID: \"be500af9-e814-41db-be2a-e4f3fa9d46bb\") " pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745352 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5ad56032-3192-4b42-b7ca-a8c3bac978d1-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745405 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbl6s\" (UniqueName: \"kubernetes.io/projected/6405bedd-bfe2-411b-937d-8f309fc6d0e8-kube-api-access-tbl6s\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745434 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-config\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745455 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3dddbbd4-eb3f-436d-8c53-cf413cecca31-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745475 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cbece9b-282a-4634-b41d-85f872a5be93-serving-cert\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745551 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqqnn\" (UniqueName: \"kubernetes.io/projected/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-kube-api-access-qqqnn\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745578 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-serving-cert\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745603 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tkqj\" (UniqueName: \"kubernetes.io/projected/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-kube-api-access-8tkqj\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745622 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5ad56032-3192-4b42-b7ca-a8c3bac978d1-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745648 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-default-certificate\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745682 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-metrics-certs\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745709 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745727 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4s82q\" (UniqueName: \"kubernetes.io/projected/dfbafd02-2fcf-4a25-a454-ade91c336036-kube-api-access-4s82q\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745752 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-ca\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745769 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-service-ca\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745798 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tb4z8\" (UniqueName: \"kubernetes.io/projected/dffa6415-1a36-41b0-9919-a04bea0bdff8-kube-api-access-tb4z8\") pod \"migrator-59844c95c7-hfk49\" (UID: \"dffa6415-1a36-41b0-9919-a04bea0bdff8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745805 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-etcd-client\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745826 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745845 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745873 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745895 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jwhj\" (UniqueName: \"kubernetes.io/projected/3dddbbd4-eb3f-436d-8c53-cf413cecca31-kube-api-access-9jwhj\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745921 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlq2b\" (UniqueName: \"kubernetes.io/projected/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-kube-api-access-qlq2b\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.745980 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-config\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746001 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbafd02-2fcf-4a25-a454-ade91c336036-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746023 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks5r5\" (UniqueName: \"kubernetes.io/projected/5ad56032-3192-4b42-b7ca-a8c3bac978d1-kube-api-access-ks5r5\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746046 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-config\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746063 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746083 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jshq\" (UniqueName: \"kubernetes.io/projected/be500af9-e814-41db-be2a-e4f3fa9d46bb-kube-api-access-5jshq\") pod \"dns-operator-744455d44c-kk7wd\" (UID: \"be500af9-e814-41db-be2a-e4f3fa9d46bb\") " pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746114 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/91b59e6a-27bf-49a6-99c0-cb20160980ac-images\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746132 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/91b59e6a-27bf-49a6-99c0-cb20160980ac-proxy-tls\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746164 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v54mn\" (UniqueName: \"kubernetes.io/projected/91b59e6a-27bf-49a6-99c0-cb20160980ac-kube-api-access-v54mn\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746183 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-service-ca-bundle\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746203 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-stats-auth\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746264 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/91b59e6a-27bf-49a6-99c0-cb20160980ac-auth-proxy-config\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746281 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dddbbd4-eb3f-436d-8c53-cf413cecca31-serving-cert\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746311 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfbafd02-2fcf-4a25-a454-ade91c336036-trusted-ca\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746356 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6405bedd-bfe2-411b-937d-8f309fc6d0e8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746405 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-client\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746482 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05b134e2-b96d-4a00-9681-6c8ce017bc74-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746517 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqs9d\" (UniqueName: \"kubernetes.io/projected/68968bee-6187-43fa-bad4-ab1eb83e9c68-kube-api-access-jqs9d\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746540 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05b134e2-b96d-4a00-9681-6c8ce017bc74-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.746619 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-m7dl4"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.747079 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.747585 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.748049 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740743 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-serving-cert\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.749935 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dfbafd02-2fcf-4a25-a454-ade91c336036-metrics-tls\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.750052 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbjc8\" (UniqueName: \"kubernetes.io/projected/05b134e2-b96d-4a00-9681-6c8ce017bc74-kube-api-access-hbjc8\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.750146 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-service-ca-bundle\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.750186 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.750217 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.743004 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.751353 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-oauth-config\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.751962 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.752574 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1540bb-bd69-4f44-ac02-8da0575056e1-serving-cert\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.754349 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6405bedd-bfe2-411b-937d-8f309fc6d0e8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.755595 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3dddbbd4-eb3f-436d-8c53-cf413cecca31-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.756156 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/91b59e6a-27bf-49a6-99c0-cb20160980ac-auth-proxy-config\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.760023 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-trusted-ca-bundle\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.760970 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-service-ca-bundle\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.761857 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.762565 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-ca\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.764648 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bmpxp"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.764851 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-4kjnr"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.765774 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/be500af9-e814-41db-be2a-e4f3fa9d46bb-metrics-tls\") pod \"dns-operator-744455d44c-kk7wd\" (UID: \"be500af9-e814-41db-be2a-e4f3fa9d46bb\") " pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.766447 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.766930 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.767794 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.768040 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-service-ca\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.768107 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3dddbbd4-eb3f-436d-8c53-cf413cecca31-serving-cert\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740678 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e0376dda-f02a-464e-ae41-18d6fddd7097-audit\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.739979 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.768454 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.768604 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740031 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.768843 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740082 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740088 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740165 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.770732 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cbece9b-282a-4634-b41d-85f872a5be93-config\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740199 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.770785 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cbece9b-282a-4634-b41d-85f872a5be93-serving-cert\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740252 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740255 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740554 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740615 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.740904 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741043 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741128 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741179 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741231 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741274 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.771447 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.771483 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-config\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741354 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741506 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741545 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741565 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741585 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741653 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741739 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.741788 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.772098 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.772138 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.772264 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.772541 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.772613 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.772841 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.775745 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.775778 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k9srb"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.776525 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.776861 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5ad56032-3192-4b42-b7ca-a8c3bac978d1-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.776992 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.777367 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.780047 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-kk7wd"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.780076 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.782543 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.783662 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-serving-cert\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.783774 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-etcd-client\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.783851 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.801996 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.810457 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bhfrw"] Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.810716 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.988799 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-config\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.989036 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tb4z8\" (UniqueName: \"kubernetes.io/projected/dffa6415-1a36-41b0-9919-a04bea0bdff8-kube-api-access-tb4z8\") pod \"migrator-59844c95c7-hfk49\" (UID: \"dffa6415-1a36-41b0-9919-a04bea0bdff8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.989070 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:12 crc kubenswrapper[4925]: I0121 10:58:12.989206 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:12.997207 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.015807 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.016542 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.016255 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.018198 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.018515 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.019109 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.019341 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.020075 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.020187 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.022731 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.029510 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-service-ca-bundle\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.035166 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.042538 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.046208 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.047331 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.049480 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05b134e2-b96d-4a00-9681-6c8ce017bc74-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.073105 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-stats-auth\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.074118 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5ad56032-3192-4b42-b7ca-a8c3bac978d1-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.075780 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.076663 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.077805 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-metrics-certs\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.078284 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.079549 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-default-certificate\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.080466 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05b134e2-b96d-4a00-9681-6c8ce017bc74-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.080674 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.083008 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.084082 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.086786 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.089118 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-85pbp"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.089658 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.090031 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.091791 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k9srb\" (UID: \"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.091854 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c93dc177-affe-4232-9b28-fd8006418818-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.091891 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5dccce36-49ce-4eea-ac64-60faf9ba2e04-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.091918 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dccce36-49ce-4eea-ac64-60faf9ba2e04-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.091990 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e265c5ca-d8d4-4ba0-81db-fd48d3974762-config\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.092291 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmj4l\" (UniqueName: \"kubernetes.io/projected/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-kube-api-access-gmj4l\") pod \"multus-admission-controller-857f4d67dd-k9srb\" (UID: \"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093024 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093206 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-profile-collector-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093270 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c93dc177-affe-4232-9b28-fd8006418818-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093412 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcwsw\" (UniqueName: \"kubernetes.io/projected/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-kube-api-access-bcwsw\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093460 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqfds\" (UniqueName: \"kubernetes.io/projected/e265c5ca-d8d4-4ba0-81db-fd48d3974762-kube-api-access-wqfds\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093596 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-srv-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093765 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e265c5ca-d8d4-4ba0-81db-fd48d3974762-serving-cert\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.093880 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/be085cb8-85ab-409e-a8cb-3d02cd7153f6-tmpfs\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.094064 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlnz8\" (UniqueName: \"kubernetes.io/projected/c98e3838-5bee-44ee-8fca-b5b429cef61e-kube-api-access-jlnz8\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.094238 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c93dc177-affe-4232-9b28-fd8006418818-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.094298 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/434007d9-38af-49cd-a16f-09c87531b8c1-signing-key\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.094555 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dccce36-49ce-4eea-ac64-60faf9ba2e04-config\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.094833 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-profile-collector-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.094856 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-apiservice-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.094880 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glttp\" (UniqueName: \"kubernetes.io/projected/be085cb8-85ab-409e-a8cb-3d02cd7153f6-kube-api-access-glttp\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.098474 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/434007d9-38af-49cd-a16f-09c87531b8c1-signing-cabundle\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.098765 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-srv-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.098853 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g2dw\" (UniqueName: \"kubernetes.io/projected/434007d9-38af-49cd-a16f-09c87531b8c1-kube-api-access-8g2dw\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.098889 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-webhook-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.099811 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.100864 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfbafd02-2fcf-4a25-a454-ade91c336036-trusted-ca\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.102366 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dfbafd02-2fcf-4a25-a454-ade91c336036-metrics-tls\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.103913 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.104963 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.106612 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.107792 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8ht27"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.109216 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-pxkk7"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.113571 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/91b59e6a-27bf-49a6-99c0-cb20160980ac-images\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.115969 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-7lrsj"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.117432 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.118991 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.121208 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.122353 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.122786 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4d577"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.123743 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.125093 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vwhv9"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.126213 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b7tzs"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.127499 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.131078 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vw8cb"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.132173 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.133371 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.134561 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-m7dl4"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.135821 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.137024 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qrrl6"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.139924 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-vz4nw"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.140632 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.141305 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-2hltc"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.141797 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.142456 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.142579 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.143643 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.144708 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.145846 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bhfrw"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.146867 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.147984 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k9srb"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.149153 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.150162 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-85pbp"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.151361 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.152964 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qrrl6"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.155190 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vz4nw"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.155207 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.156622 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.161448 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-2txwq"] Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.177881 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 21 10:58:13 crc kubenswrapper[4925]: W0121 10:58:13.180098 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c3596d1_1f08_4703_ab63_c29358aac0d9.slice/crio-1201133667aae405ce43744b939e5f2b5b1bbddab2d0311e54ba877ccb7af818 WatchSource:0}: Error finding container 1201133667aae405ce43744b939e5f2b5b1bbddab2d0311e54ba877ccb7af818: Status 404 returned error can't find the container with id 1201133667aae405ce43744b939e5f2b5b1bbddab2d0311e54ba877ccb7af818 Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.181325 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.186386 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200182 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c93dc177-affe-4232-9b28-fd8006418818-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200247 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/434007d9-38af-49cd-a16f-09c87531b8c1-signing-key\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200350 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dccce36-49ce-4eea-ac64-60faf9ba2e04-config\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200388 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-profile-collector-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200450 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-apiservice-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200485 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glttp\" (UniqueName: \"kubernetes.io/projected/be085cb8-85ab-409e-a8cb-3d02cd7153f6-kube-api-access-glttp\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200628 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/434007d9-38af-49cd-a16f-09c87531b8c1-signing-cabundle\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200725 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-srv-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200802 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g2dw\" (UniqueName: \"kubernetes.io/projected/434007d9-38af-49cd-a16f-09c87531b8c1-kube-api-access-8g2dw\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200828 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-webhook-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200894 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5dccce36-49ce-4eea-ac64-60faf9ba2e04-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200949 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k9srb\" (UID: \"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.200985 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c93dc177-affe-4232-9b28-fd8006418818-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201039 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dccce36-49ce-4eea-ac64-60faf9ba2e04-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201077 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e265c5ca-d8d4-4ba0-81db-fd48d3974762-config\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201215 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmj4l\" (UniqueName: \"kubernetes.io/projected/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-kube-api-access-gmj4l\") pod \"multus-admission-controller-857f4d67dd-k9srb\" (UID: \"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201305 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-profile-collector-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201387 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c93dc177-affe-4232-9b28-fd8006418818-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201602 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcwsw\" (UniqueName: \"kubernetes.io/projected/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-kube-api-access-bcwsw\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201722 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqfds\" (UniqueName: \"kubernetes.io/projected/e265c5ca-d8d4-4ba0-81db-fd48d3974762-kube-api-access-wqfds\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201788 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-srv-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201883 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/be085cb8-85ab-409e-a8cb-3d02cd7153f6-tmpfs\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.201969 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e265c5ca-d8d4-4ba0-81db-fd48d3974762-serving-cert\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.202145 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlnz8\" (UniqueName: \"kubernetes.io/projected/c98e3838-5bee-44ee-8fca-b5b429cef61e-kube-api-access-jlnz8\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.202587 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/be085cb8-85ab-409e-a8cb-3d02cd7153f6-tmpfs\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.203341 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.223152 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.233907 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/91b59e6a-27bf-49a6-99c0-cb20160980ac-proxy-tls\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.241172 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.261697 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.265886 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6405bedd-bfe2-411b-937d-8f309fc6d0e8-proxy-tls\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.281468 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.290660 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-config\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.302062 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.322543 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.360344 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs25v\" (UniqueName: \"kubernetes.io/projected/78713d9a-139c-4d4e-8068-a0d0d98b86df-kube-api-access-rs25v\") pod \"console-operator-58897d9998-4d577\" (UID: \"78713d9a-139c-4d4e-8068-a0d0d98b86df\") " pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.361488 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.380713 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.418326 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn679\" (UniqueName: \"kubernetes.io/projected/5a264bb6-3e63-4411-b0a4-95be21527653-kube-api-access-dn679\") pod \"machine-api-operator-5694c8668f-bmpxp\" (UID: \"5a264bb6-3e63-4411-b0a4-95be21527653\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.695125 4925 request.go:700] Waited for 1.042551934s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/serviceaccounts/machine-approver-sa/token Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.697411 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.698241 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.700705 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.722862 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.726611 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp55x\" (UniqueName: \"kubernetes.io/projected/7cbece9b-282a-4634-b41d-85f872a5be93-kube-api-access-mp55x\") pod \"authentication-operator-69f744f599-4kjnr\" (UID: \"7cbece9b-282a-4634-b41d-85f872a5be93\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.728909 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wz4nd\" (UniqueName: \"kubernetes.io/projected/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-kube-api-access-wz4nd\") pod \"route-controller-manager-6576b87f9c-nrk92\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.731873 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmd77\" (UniqueName: \"kubernetes.io/projected/b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9-kube-api-access-cmd77\") pod \"cluster-samples-operator-665b6dd947-gqsng\" (UID: \"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.732295 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.732328 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc99b\" (UniqueName: \"kubernetes.io/projected/81742698-2b71-46b2-93fa-1552cfa27f8a-kube-api-access-cc99b\") pod \"machine-approver-56656f9798-g66gm\" (UID: \"81742698-2b71-46b2-93fa-1552cfa27f8a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.732531 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vvzw\" (UniqueName: \"kubernetes.io/projected/b9b96df7-05d7-4cd0-9e30-c5e485f31804-kube-api-access-8vvzw\") pod \"openshift-apiserver-operator-796bbdcf4f-77kwx\" (UID: \"b9b96df7-05d7-4cd0-9e30-c5e485f31804\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.733641 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4np7\" (UniqueName: \"kubernetes.io/projected/8f1540bb-bd69-4f44-ac02-8da0575056e1-kube-api-access-j4np7\") pod \"controller-manager-879f6c89f-dz6wr\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.736085 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.737187 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6zm6\" (UniqueName: \"kubernetes.io/projected/b4eed50b-ef22-4637-9aa1-d8528310aed1-kube-api-access-c6zm6\") pod \"oauth-openshift-558db77b4-vwhv9\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.737486 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hlcq\" (UniqueName: \"kubernetes.io/projected/890e3b6e-bd8d-438c-992b-508bb751bdca-kube-api-access-9hlcq\") pod \"downloads-7954f5f757-vw8cb\" (UID: \"890e3b6e-bd8d-438c-992b-508bb751bdca\") " pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.738541 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tt9vp\" (UniqueName: \"kubernetes.io/projected/5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605-kube-api-access-tt9vp\") pod \"apiserver-7bbb656c7d-dnjzx\" (UID: \"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.740085 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hd5c\" (UniqueName: \"kubernetes.io/projected/e0376dda-f02a-464e-ae41-18d6fddd7097-kube-api-access-6hd5c\") pod \"apiserver-76f77b778f-pxkk7\" (UID: \"e0376dda-f02a-464e-ae41-18d6fddd7097\") " pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.742654 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.744219 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-668sl\" (UniqueName: \"kubernetes.io/projected/59445cd0-2391-49e1-9a4e-6ca280c8ab85-kube-api-access-668sl\") pod \"console-f9d7485db-7lrsj\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.746509 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5ad56032-3192-4b42-b7ca-a8c3bac978d1-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.749596 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jshq\" (UniqueName: \"kubernetes.io/projected/be500af9-e814-41db-be2a-e4f3fa9d46bb-kube-api-access-5jshq\") pod \"dns-operator-744455d44c-kk7wd\" (UID: \"be500af9-e814-41db-be2a-e4f3fa9d46bb\") " pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.750478 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.759607 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v54mn\" (UniqueName: \"kubernetes.io/projected/91b59e6a-27bf-49a6-99c0-cb20160980ac-kube-api-access-v54mn\") pod \"machine-config-operator-74547568cd-q4cqt\" (UID: \"91b59e6a-27bf-49a6-99c0-cb20160980ac\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.781588 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbl6s\" (UniqueName: \"kubernetes.io/projected/6405bedd-bfe2-411b-937d-8f309fc6d0e8-kube-api-access-tbl6s\") pod \"machine-config-controller-84d6567774-2d2sj\" (UID: \"6405bedd-bfe2-411b-937d-8f309fc6d0e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.794309 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-2txwq" event={"ID":"5c3596d1-1f08-4703-ab63-c29358aac0d9","Type":"ContainerStarted","Data":"1201133667aae405ce43744b939e5f2b5b1bbddab2d0311e54ba877ccb7af818"} Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.804928 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4s82q\" (UniqueName: \"kubernetes.io/projected/dfbafd02-2fcf-4a25-a454-ade91c336036-kube-api-access-4s82q\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.814704 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.841957 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.852702 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.882520 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.884066 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqqnn\" (UniqueName: \"kubernetes.io/projected/5cb8f784-0f6a-43c7-a37a-d7f65668af7a-kube-api-access-qqqnn\") pod \"openshift-controller-manager-operator-756b6f6bc6-jqrcn\" (UID: \"5cb8f784-0f6a-43c7-a37a-d7f65668af7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.888174 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tkqj\" (UniqueName: \"kubernetes.io/projected/cfd1ca7f-583a-40a7-a485-fb01c60d77c6-kube-api-access-8tkqj\") pod \"etcd-operator-b45778765-b7tzs\" (UID: \"cfd1ca7f-583a-40a7-a485-fb01c60d77c6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.889409 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqs9d\" (UniqueName: \"kubernetes.io/projected/68968bee-6187-43fa-bad4-ab1eb83e9c68-kube-api-access-jqs9d\") pod \"marketplace-operator-79b997595-8ht27\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:13 crc kubenswrapper[4925]: I0121 10:58:13.893002 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbjc8\" (UniqueName: \"kubernetes.io/projected/05b134e2-b96d-4a00-9681-6c8ce017bc74-kube-api-access-hbjc8\") pod \"kube-storage-version-migrator-operator-b67b599dd-w2zsd\" (UID: \"05b134e2-b96d-4a00-9681-6c8ce017bc74\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.023025 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.023845 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.027191 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.028033 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.028267 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.028440 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.028624 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.028802 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.031456 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.031825 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.032583 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.032660 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c93dc177-affe-4232-9b28-fd8006418818-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.032951 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.033185 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.033265 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.033909 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.042539 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.052447 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c93dc177-affe-4232-9b28-fd8006418818-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.063021 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.377811 4925 secret.go:188] Couldn't get secret openshift-multus/multus-admission-controller-secret: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.377908 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-webhook-certs podName:bebe6bc4-7b86-4688-ab28-408d5fc1ed7e nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.877879845 +0000 UTC m=+186.481771779 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-webhook-certs") pod "multus-admission-controller-857f4d67dd-k9srb" (UID: "bebe6bc4-7b86-4688-ab28-408d5fc1ed7e") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378417 4925 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378448 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-profile-collector-cert podName:b22e36ec-37f7-4d2c-87fb-ce56d5436a8d nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.878441004 +0000 UTC m=+186.482332938 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-profile-collector-cert") pod "catalog-operator-68c6474976-krz9k" (UID: "b22e36ec-37f7-4d2c-87fb-ce56d5436a8d") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378603 4925 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378650 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/434007d9-38af-49cd-a16f-09c87531b8c1-signing-key podName:434007d9-38af-49cd-a16f-09c87531b8c1 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.878643911 +0000 UTC m=+186.482535845 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/434007d9-38af-49cd-a16f-09c87531b8c1-signing-key") pod "service-ca-9c57cc56f-bhfrw" (UID: "434007d9-38af-49cd-a16f-09c87531b8c1") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378666 4925 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378683 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-apiservice-cert podName:be085cb8-85ab-409e-a8cb-3d02cd7153f6 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.878678962 +0000 UTC m=+186.482570896 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-apiservice-cert") pod "packageserver-d55dfcdfc-5l4bq" (UID: "be085cb8-85ab-409e-a8cb-3d02cd7153f6") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378738 4925 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378765 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/434007d9-38af-49cd-a16f-09c87531b8c1-signing-cabundle podName:434007d9-38af-49cd-a16f-09c87531b8c1 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.878759965 +0000 UTC m=+186.482651899 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/434007d9-38af-49cd-a16f-09c87531b8c1-signing-cabundle") pod "service-ca-9c57cc56f-bhfrw" (UID: "434007d9-38af-49cd-a16f-09c87531b8c1") : failed to sync configmap cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378782 4925 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378803 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-webhook-cert podName:be085cb8-85ab-409e-a8cb-3d02cd7153f6 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.878798566 +0000 UTC m=+186.482690500 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-webhook-cert") pod "packageserver-d55dfcdfc-5l4bq" (UID: "be085cb8-85ab-409e-a8cb-3d02cd7153f6") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378820 4925 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378839 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-profile-collector-cert podName:c98e3838-5bee-44ee-8fca-b5b429cef61e nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.878834537 +0000 UTC m=+186.482726471 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-profile-collector-cert") pod "olm-operator-6b444d44fb-z9nt4" (UID: "c98e3838-5bee-44ee-8fca-b5b429cef61e") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378857 4925 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.378878 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-srv-cert podName:c98e3838-5bee-44ee-8fca-b5b429cef61e nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.878873459 +0000 UTC m=+186.482765383 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-srv-cert") pod "olm-operator-6b444d44fb-z9nt4" (UID: "c98e3838-5bee-44ee-8fca-b5b429cef61e") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.379017 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.382272 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.382897 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383128 4925 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383235 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-srv-cert podName:b22e36ec-37f7-4d2c-87fb-ce56d5436a8d nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.883205612 +0000 UTC m=+186.487097546 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-srv-cert") pod "catalog-operator-68c6474976-krz9k" (UID: "b22e36ec-37f7-4d2c-87fb-ce56d5436a8d") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383322 4925 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383431 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e265c5ca-d8d4-4ba0-81db-fd48d3974762-config podName:e265c5ca-d8d4-4ba0-81db-fd48d3974762 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.883382098 +0000 UTC m=+186.487274202 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e265c5ca-d8d4-4ba0-81db-fd48d3974762-config") pod "service-ca-operator-777779d784-ntfc2" (UID: "e265c5ca-d8d4-4ba0-81db-fd48d3974762") : failed to sync configmap cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383473 4925 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383499 4925 configmap.go:193] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383527 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e265c5ca-d8d4-4ba0-81db-fd48d3974762-serving-cert podName:e265c5ca-d8d4-4ba0-81db-fd48d3974762 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.883518382 +0000 UTC m=+186.487410536 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e265c5ca-d8d4-4ba0-81db-fd48d3974762-serving-cert") pod "service-ca-operator-777779d784-ntfc2" (UID: "e265c5ca-d8d4-4ba0-81db-fd48d3974762") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383555 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5dccce36-49ce-4eea-ac64-60faf9ba2e04-config podName:5dccce36-49ce-4eea-ac64-60faf9ba2e04 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.883543693 +0000 UTC m=+186.487435857 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/5dccce36-49ce-4eea-ac64-60faf9ba2e04-config") pod "kube-apiserver-operator-766d6c64bb-5ml9s" (UID: "5dccce36-49ce-4eea-ac64-60faf9ba2e04") : failed to sync configmap cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383564 4925 secret.go:188] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: E0121 10:58:14.383638 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5dccce36-49ce-4eea-ac64-60faf9ba2e04-serving-cert podName:5dccce36-49ce-4eea-ac64-60faf9ba2e04 nodeName:}" failed. No retries permitted until 2026-01-21 10:58:14.883615726 +0000 UTC m=+186.487507660 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5dccce36-49ce-4eea-ac64-60faf9ba2e04-serving-cert") pod "kube-apiserver-operator-766d6c64bb-5ml9s" (UID: "5dccce36-49ce-4eea-ac64-60faf9ba2e04") : failed to sync secret cache: timed out waiting for the condition Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.383830 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.388671 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.412241 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.412712 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413038 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413211 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413362 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413475 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413619 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413748 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413849 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.413958 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.414084 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.454665 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jwhj\" (UniqueName: \"kubernetes.io/projected/3dddbbd4-eb3f-436d-8c53-cf413cecca31-kube-api-access-9jwhj\") pod \"openshift-config-operator-7777fb866f-9fbhr\" (UID: \"3dddbbd4-eb3f-436d-8c53-cf413cecca31\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.468902 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlq2b\" (UniqueName: \"kubernetes.io/projected/fb3fdc07-c6f5-4330-8b00-e454c98ef11d-kube-api-access-qlq2b\") pod \"router-default-5444994796-n2k47\" (UID: \"fb3fdc07-c6f5-4330-8b00-e454c98ef11d\") " pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.574479 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.574885 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.575031 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.575286 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.575525 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.577179 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.577616 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.584556 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbafd02-2fcf-4a25-a454-ade91c336036-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4kbqk\" (UID: \"dfbafd02-2fcf-4a25-a454-ade91c336036\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.589461 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.594930 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks5r5\" (UniqueName: \"kubernetes.io/projected/5ad56032-3192-4b42-b7ca-a8c3bac978d1-kube-api-access-ks5r5\") pod \"cluster-image-registry-operator-dc59b4c8b-cq2j2\" (UID: \"5ad56032-3192-4b42-b7ca-a8c3bac978d1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.600510 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce7cc91-68ce-4bcc-99a0-436380c8a2e8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-28hkz\" (UID: \"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.600563 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tb4z8\" (UniqueName: \"kubernetes.io/projected/dffa6415-1a36-41b0-9919-a04bea0bdff8-kube-api-access-tb4z8\") pod \"migrator-59844c95c7-hfk49\" (UID: \"dffa6415-1a36-41b0-9919-a04bea0bdff8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.602862 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.608855 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.620065 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.622574 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.663214 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.683179 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.869854 4925 request.go:700] Waited for 1.72765442s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/configmaps?fieldSelector=metadata.name%3Ddns-default&limit=500&resourceVersion=0 Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.869973 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.870170 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.870359 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.871258 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.875111 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.875341 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.875809 4925 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.879810 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.880233 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.880517 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.881339 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.881434 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.881857 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.886686 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.892298 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-2txwq" event={"ID":"5c3596d1-1f08-4703-ab63-c29358aac0d9","Type":"ContainerStarted","Data":"0dfd8b675bb6b898cd91b2437844b61903004113ba771ae5915fb3801fea8984"} Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.902781 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.925971 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.991861 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dccce36-49ce-4eea-ac64-60faf9ba2e04-config\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.991963 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-profile-collector-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.991990 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-apiservice-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992067 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/434007d9-38af-49cd-a16f-09c87531b8c1-signing-cabundle\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992102 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-srv-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992126 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-webhook-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992164 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k9srb\" (UID: \"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992189 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dccce36-49ce-4eea-ac64-60faf9ba2e04-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992216 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e265c5ca-d8d4-4ba0-81db-fd48d3974762-config\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992267 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-profile-collector-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992414 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-srv-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992475 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e265c5ca-d8d4-4ba0-81db-fd48d3974762-serving-cert\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.992539 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/434007d9-38af-49cd-a16f-09c87531b8c1-signing-key\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.994324 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dccce36-49ce-4eea-ac64-60faf9ba2e04-config\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.995965 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.996348 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e265c5ca-d8d4-4ba0-81db-fd48d3974762-config\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.996355 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 21 10:58:14 crc kubenswrapper[4925]: I0121 10:58:14.997710 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/434007d9-38af-49cd-a16f-09c87531b8c1-signing-cabundle\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.007046 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-webhook-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.007779 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e265c5ca-d8d4-4ba0-81db-fd48d3974762-serving-cert\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.008031 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k9srb\" (UID: \"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.013639 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-srv-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.015510 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c98e3838-5bee-44ee-8fca-b5b429cef61e-profile-collector-cert\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.016019 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/434007d9-38af-49cd-a16f-09c87531b8c1-signing-key\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.016318 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-profile-collector-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.019903 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glttp\" (UniqueName: \"kubernetes.io/projected/be085cb8-85ab-409e-a8cb-3d02cd7153f6-kube-api-access-glttp\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.021283 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-srv-cert\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.145308 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dccce36-49ce-4eea-ac64-60faf9ba2e04-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.150636 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/be085cb8-85ab-409e-a8cb-3d02cd7153f6-apiservice-cert\") pod \"packageserver-d55dfcdfc-5l4bq\" (UID: \"be085cb8-85ab-409e-a8cb-3d02cd7153f6\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.151787 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g2dw\" (UniqueName: \"kubernetes.io/projected/434007d9-38af-49cd-a16f-09c87531b8c1-kube-api-access-8g2dw\") pod \"service-ca-9c57cc56f-bhfrw\" (UID: \"434007d9-38af-49cd-a16f-09c87531b8c1\") " pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.217846 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.675107 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.702461 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5dccce36-49ce-4eea-ac64-60faf9ba2e04-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5ml9s\" (UID: \"5dccce36-49ce-4eea-ac64-60faf9ba2e04\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.709889 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmj4l\" (UniqueName: \"kubernetes.io/projected/bebe6bc4-7b86-4688-ab28-408d5fc1ed7e-kube-api-access-gmj4l\") pod \"multus-admission-controller-857f4d67dd-k9srb\" (UID: \"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.712352 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqfds\" (UniqueName: \"kubernetes.io/projected/e265c5ca-d8d4-4ba0-81db-fd48d3974762-kube-api-access-wqfds\") pod \"service-ca-operator-777779d784-ntfc2\" (UID: \"e265c5ca-d8d4-4ba0-81db-fd48d3974762\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.715614 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c93dc177-affe-4232-9b28-fd8006418818-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jb9kj\" (UID: \"c93dc177-affe-4232-9b28-fd8006418818\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.724307 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlnz8\" (UniqueName: \"kubernetes.io/projected/c98e3838-5bee-44ee-8fca-b5b429cef61e-kube-api-access-jlnz8\") pod \"olm-operator-6b444d44fb-z9nt4\" (UID: \"c98e3838-5bee-44ee-8fca-b5b429cef61e\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.743892 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-4kjnr"] Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.756613 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcwsw\" (UniqueName: \"kubernetes.io/projected/b22e36ec-37f7-4d2c-87fb-ce56d5436a8d-kube-api-access-bcwsw\") pod \"catalog-operator-68c6474976-krz9k\" (UID: \"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.759414 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781065 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwn4h\" (UniqueName: \"kubernetes.io/projected/5c2fa6a9-ee76-4308-a8f1-095d9720c688-kube-api-access-mwn4h\") pod \"control-plane-machine-set-operator-78cbb6b69f-2fd99\" (UID: \"5c2fa6a9-ee76-4308-a8f1-095d9720c688\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781151 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5c2fa6a9-ee76-4308-a8f1-095d9720c688-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-2fd99\" (UID: \"5c2fa6a9-ee76-4308-a8f1-095d9720c688\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781197 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-bound-sa-token\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781323 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-tls\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781353 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0770d392-cbe7-4049-aa81-46d3892bc4a9-installation-pull-secrets\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781385 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8hlp\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-kube-api-access-m8hlp\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781440 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0770d392-cbe7-4049-aa81-46d3892bc4a9-ca-trust-extracted\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781502 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781731 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-certificates\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.781875 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-trusted-ca\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:15 crc kubenswrapper[4925]: E0121 10:58:15.782741 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:16.282715327 +0000 UTC m=+187.886607261 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.786068 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" Jan 21 10:58:15 crc kubenswrapper[4925]: I0121 10:58:15.884302 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.167241 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:16.667104232 +0000 UTC m=+188.270996166 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.167462 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.169269 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.169781 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-metrics-tls\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.171043 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.171806 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.175889 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0bccbd0b-3782-4ad2-bdd5-f21e5109165d-cert\") pod \"ingress-canary-85pbp\" (UID: \"0bccbd0b-3782-4ad2-bdd5-f21e5109165d\") " pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.179348 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bstp5\" (UniqueName: \"kubernetes.io/projected/0bccbd0b-3782-4ad2-bdd5-f21e5109165d-kube-api-access-bstp5\") pod \"ingress-canary-85pbp\" (UID: \"0bccbd0b-3782-4ad2-bdd5-f21e5109165d\") " pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.190239 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-config-volume\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.197765 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99bdf\" (UniqueName: \"kubernetes.io/projected/15dcf9e9-44e8-4662-9f3d-6cef771808c5-kube-api-access-99bdf\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.198480 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-tls\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.203518 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0770d392-cbe7-4049-aa81-46d3892bc4a9-installation-pull-secrets\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.204998 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8hlp\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-kube-api-access-m8hlp\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.205984 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0770d392-cbe7-4049-aa81-46d3892bc4a9-ca-trust-extracted\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.207323 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-plugins-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.209705 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bmpxp"] Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.210130 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-socket-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.210368 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.210557 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/10a74ffd-f45d-44ec-8287-d6a839fbb0af-node-bootstrap-token\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.213629 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-certificates\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.214335 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-csi-data-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.214901 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-trusted-ca\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.220729 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:16.720701424 +0000 UTC m=+188.324593568 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.223155 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee785c06-3ec0-4917-a762-a5a8c178b95a-config-volume\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.224139 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0770d392-cbe7-4049-aa81-46d3892bc4a9-ca-trust-extracted\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.225621 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-certificates\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.235026 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-trusted-ca\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.237679 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d61279b-b5c2-440f-9bac-689a27484f8c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fvwpn\" (UID: \"4d61279b-b5c2-440f-9bac-689a27484f8c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.242084 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l54s\" (UniqueName: \"kubernetes.io/projected/10a74ffd-f45d-44ec-8287-d6a839fbb0af-kube-api-access-9l54s\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.242238 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee785c06-3ec0-4917-a762-a5a8c178b95a-secret-volume\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.242387 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwn4h\" (UniqueName: \"kubernetes.io/projected/5c2fa6a9-ee76-4308-a8f1-095d9720c688-kube-api-access-mwn4h\") pod \"control-plane-machine-set-operator-78cbb6b69f-2fd99\" (UID: \"5c2fa6a9-ee76-4308-a8f1-095d9720c688\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.242664 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5c2fa6a9-ee76-4308-a8f1-095d9720c688-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-2fd99\" (UID: \"5c2fa6a9-ee76-4308-a8f1-095d9720c688\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.242823 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r78r7\" (UniqueName: \"kubernetes.io/projected/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-kube-api-access-r78r7\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.242959 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl55r\" (UniqueName: \"kubernetes.io/projected/ee785c06-3ec0-4917-a762-a5a8c178b95a-kube-api-access-kl55r\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.243278 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-bound-sa-token\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.243508 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjm6j\" (UniqueName: \"kubernetes.io/projected/4d61279b-b5c2-440f-9bac-689a27484f8c-kube-api-access-mjm6j\") pod \"package-server-manager-789f6589d5-fvwpn\" (UID: \"4d61279b-b5c2-440f-9bac-689a27484f8c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.243703 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-registration-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.243882 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/10a74ffd-f45d-44ec-8287-d6a839fbb0af-certs\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.244125 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-mountpoint-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.245931 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0770d392-cbe7-4049-aa81-46d3892bc4a9-installation-pull-secrets\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.246315 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-tls\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.247678 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" event={"ID":"81742698-2b71-46b2-93fa-1552cfa27f8a","Type":"ContainerStarted","Data":"f563f1bdf42ecaf5600e85312478f968ffcb88954939b4343586a199a8e961e8"} Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.249502 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8hlp\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-kube-api-access-m8hlp\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.257494 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5c2fa6a9-ee76-4308-a8f1-095d9720c688-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-2fd99\" (UID: \"5c2fa6a9-ee76-4308-a8f1-095d9720c688\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.270092 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-bound-sa-token\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.270338 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4d577"] Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.273773 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwn4h\" (UniqueName: \"kubernetes.io/projected/5c2fa6a9-ee76-4308-a8f1-095d9720c688-kube-api-access-mwn4h\") pod \"control-plane-machine-set-operator-78cbb6b69f-2fd99\" (UID: \"5c2fa6a9-ee76-4308-a8f1-095d9720c688\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.315142 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.351938 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.352923 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:16.852873051 +0000 UTC m=+188.456764995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355454 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-config-volume\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355519 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99bdf\" (UniqueName: \"kubernetes.io/projected/15dcf9e9-44e8-4662-9f3d-6cef771808c5-kube-api-access-99bdf\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355719 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-plugins-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355781 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-socket-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355820 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355854 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/10a74ffd-f45d-44ec-8287-d6a839fbb0af-node-bootstrap-token\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355903 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-csi-data-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.355966 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee785c06-3ec0-4917-a762-a5a8c178b95a-config-volume\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356020 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d61279b-b5c2-440f-9bac-689a27484f8c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fvwpn\" (UID: \"4d61279b-b5c2-440f-9bac-689a27484f8c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356053 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l54s\" (UniqueName: \"kubernetes.io/projected/10a74ffd-f45d-44ec-8287-d6a839fbb0af-kube-api-access-9l54s\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356081 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee785c06-3ec0-4917-a762-a5a8c178b95a-secret-volume\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356127 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r78r7\" (UniqueName: \"kubernetes.io/projected/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-kube-api-access-r78r7\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356155 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl55r\" (UniqueName: \"kubernetes.io/projected/ee785c06-3ec0-4917-a762-a5a8c178b95a-kube-api-access-kl55r\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356198 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjm6j\" (UniqueName: \"kubernetes.io/projected/4d61279b-b5c2-440f-9bac-689a27484f8c-kube-api-access-mjm6j\") pod \"package-server-manager-789f6589d5-fvwpn\" (UID: \"4d61279b-b5c2-440f-9bac-689a27484f8c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356229 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-registration-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356263 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/10a74ffd-f45d-44ec-8287-d6a839fbb0af-certs\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356294 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-mountpoint-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356343 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-metrics-tls\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356406 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0bccbd0b-3782-4ad2-bdd5-f21e5109165d-cert\") pod \"ingress-canary-85pbp\" (UID: \"0bccbd0b-3782-4ad2-bdd5-f21e5109165d\") " pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.356452 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bstp5\" (UniqueName: \"kubernetes.io/projected/0bccbd0b-3782-4ad2-bdd5-f21e5109165d-kube-api-access-bstp5\") pod \"ingress-canary-85pbp\" (UID: \"0bccbd0b-3782-4ad2-bdd5-f21e5109165d\") " pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.357982 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-config-volume\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.358601 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-plugins-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.358668 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-socket-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.359551 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:16.859532721 +0000 UTC m=+188.463424665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.364707 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-csi-data-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.365201 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-registration-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.484696 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.488042 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:16.987990752 +0000 UTC m=+188.591882696 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.494698 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/15dcf9e9-44e8-4662-9f3d-6cef771808c5-mountpoint-dir\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.526422 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee785c06-3ec0-4917-a762-a5a8c178b95a-config-volume\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.535287 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/10a74ffd-f45d-44ec-8287-d6a839fbb0af-certs\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.545190 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee785c06-3ec0-4917-a762-a5a8c178b95a-secret-volume\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.558270 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0bccbd0b-3782-4ad2-bdd5-f21e5109165d-cert\") pod \"ingress-canary-85pbp\" (UID: \"0bccbd0b-3782-4ad2-bdd5-f21e5109165d\") " pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.561815 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/10a74ffd-f45d-44ec-8287-d6a839fbb0af-node-bootstrap-token\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.563541 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d61279b-b5c2-440f-9bac-689a27484f8c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fvwpn\" (UID: \"4d61279b-b5c2-440f-9bac-689a27484f8c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.566213 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bstp5\" (UniqueName: \"kubernetes.io/projected/0bccbd0b-3782-4ad2-bdd5-f21e5109165d-kube-api-access-bstp5\") pod \"ingress-canary-85pbp\" (UID: \"0bccbd0b-3782-4ad2-bdd5-f21e5109165d\") " pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.602119 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.603052 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:17.103028228 +0000 UTC m=+188.706920162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.607318 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99bdf\" (UniqueName: \"kubernetes.io/projected/15dcf9e9-44e8-4662-9f3d-6cef771808c5-kube-api-access-99bdf\") pod \"csi-hostpathplugin-qrrl6\" (UID: \"15dcf9e9-44e8-4662-9f3d-6cef771808c5\") " pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.608508 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl55r\" (UniqueName: \"kubernetes.io/projected/ee785c06-3ec0-4917-a762-a5a8c178b95a-kube-api-access-kl55r\") pod \"collect-profiles-29483205-hjb8l\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.612170 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjm6j\" (UniqueName: \"kubernetes.io/projected/4d61279b-b5c2-440f-9bac-689a27484f8c-kube-api-access-mjm6j\") pod \"package-server-manager-789f6589d5-fvwpn\" (UID: \"4d61279b-b5c2-440f-9bac-689a27484f8c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.615209 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r78r7\" (UniqueName: \"kubernetes.io/projected/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-kube-api-access-r78r7\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.617060 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l54s\" (UniqueName: \"kubernetes.io/projected/10a74ffd-f45d-44ec-8287-d6a839fbb0af-kube-api-access-9l54s\") pod \"machine-config-server-2hltc\" (UID: \"10a74ffd-f45d-44ec-8287-d6a839fbb0af\") " pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.617272 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6-metrics-tls\") pod \"dns-default-vz4nw\" (UID: \"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6\") " pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: W0121 10:58:16.694210 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78713d9a_139c_4d4e_8068_a0d0d98b86df.slice/crio-941d9190c442b8d4d271aad59a8f23ada403d205bda429e5fa61e8455965b629 WatchSource:0}: Error finding container 941d9190c442b8d4d271aad59a8f23ada403d205bda429e5fa61e8455965b629: Status 404 returned error can't find the container with id 941d9190c442b8d4d271aad59a8f23ada403d205bda429e5fa61e8455965b629 Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.703053 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.703462 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.704144 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:17.2040784 +0000 UTC m=+188.807970344 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.745001 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-85pbp" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.753018 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.811570 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.817756 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.822139 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:17.322120126 +0000 UTC m=+188.926012060 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.823137 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.881881 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2hltc" Jan 21 10:58:16 crc kubenswrapper[4925]: I0121 10:58:16.947301 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:16 crc kubenswrapper[4925]: E0121 10:58:16.947912 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:17.447893228 +0000 UTC m=+189.051785162 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.066530 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.067083 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:17.567064002 +0000 UTC m=+189.170955936 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.354777 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.356275 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:17.856220083 +0000 UTC m=+189.460112027 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.370079 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" event={"ID":"7cbece9b-282a-4634-b41d-85f872a5be93","Type":"ContainerStarted","Data":"a6e4a84d9ae6dc5a9b1f7944be6fb1d28a91b20c683da17455043b352ebb7fd1"} Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.379126 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4d577" event={"ID":"78713d9a-139c-4d4e-8068-a0d0d98b86df","Type":"ContainerStarted","Data":"941d9190c442b8d4d271aad59a8f23ada403d205bda429e5fa61e8455965b629"} Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.388071 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-n2k47" event={"ID":"fb3fdc07-c6f5-4330-8b00-e454c98ef11d","Type":"ContainerStarted","Data":"39f5b26db4c71b7e59aa15f3a845243a3575ee4bf42be85f205998065506adea"} Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.392874 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" event={"ID":"5a264bb6-3e63-4411-b0a4-95be21527653","Type":"ContainerStarted","Data":"3f1686d72e95dff0692d8390ded689c8c0d9d6740b79ff3bccec75d8af0258f0"} Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.512924 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.513536 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.013520741 +0000 UTC m=+189.617412675 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.615003 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.615260 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.115232676 +0000 UTC m=+189.719124610 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.615678 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.616203 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.116194057 +0000 UTC m=+189.720085991 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.742915 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.743563 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.243539802 +0000 UTC m=+189.847431736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.851417 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.852023 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.35200528 +0000 UTC m=+189.955897214 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.956471 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.956893 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.456847108 +0000 UTC m=+190.060739052 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:17 crc kubenswrapper[4925]: I0121 10:58:17.957219 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:17 crc kubenswrapper[4925]: E0121 10:58:17.957920 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.457896622 +0000 UTC m=+190.061788556 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.059386 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.060027 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.5600032 +0000 UTC m=+190.163895134 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.248341 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.248972 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.748946787 +0000 UTC m=+190.352838721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.350902 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.351150 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.851116647 +0000 UTC m=+190.455008581 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.351552 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.352042 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.852032147 +0000 UTC m=+190.455924081 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.404777 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2hltc" event={"ID":"10a74ffd-f45d-44ec-8287-d6a839fbb0af","Type":"ContainerStarted","Data":"2c792a5e4e963e8b4d39b5553bed9b9d72e047313f9157b77cd8821d25427419"} Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.474990 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.475863 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:18.975837684 +0000 UTC m=+190.579729618 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.478564 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-2txwq" event={"ID":"5c3596d1-1f08-4703-ab63-c29358aac0d9","Type":"ContainerStarted","Data":"b08cfb67f2a14cd02968107741fb7a80926ee8b3466d99b7c49610de9de86f28"} Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.578540 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:19.07851224 +0000 UTC m=+190.682404174 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.578857 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.680833 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.681462 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:19.181439614 +0000 UTC m=+190.785331548 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.782913 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.783534 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:19.2835159 +0000 UTC m=+190.887407834 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:18 crc kubenswrapper[4925]: I0121 10:58:18.930112 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:18 crc kubenswrapper[4925]: E0121 10:58:18.930965 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:19.430941821 +0000 UTC m=+191.034833765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.077959 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.078741 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:19.578721533 +0000 UTC m=+191.182613467 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.108208 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.180333 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.181125 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:19.681103799 +0000 UTC m=+191.284995733 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.355089 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.355727 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:19.855702161 +0000 UTC m=+191.459594285 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.557236 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.557888 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.057862067 +0000 UTC m=+191.661754001 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.558169 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.558716 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.058704815 +0000 UTC m=+191.662596749 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.648852 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4d577" event={"ID":"78713d9a-139c-4d4e-8068-a0d0d98b86df","Type":"ContainerStarted","Data":"307c5bd1512e9d41b0e6856aefd03b4db829dbdc69720b18f556a7131ad3f67f"} Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.654680 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.661318 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.663174 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.16314916 +0000 UTC m=+191.767041094 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.731597 4925 patch_prober.go:28] interesting pod/console-operator-58897d9998-4d577 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/readyz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.747692 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4d577" podUID="78713d9a-139c-4d4e-8068-a0d0d98b86df" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/readyz\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.782617 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2hltc" event={"ID":"10a74ffd-f45d-44ec-8287-d6a839fbb0af","Type":"ContainerStarted","Data":"f298e132a677e2d725577f9f928ffa3943521097afb186518babce2e6c3d331c"} Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.782797 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.783415 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.283363767 +0000 UTC m=+191.887255701 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.807505 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" event={"ID":"5a264bb6-3e63-4411-b0a4-95be21527653","Type":"ContainerStarted","Data":"ea114748e3f6f3eaaf983f40faefc10126bf62558ad8d851475e6c018b695dcc"} Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.841794 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-n2k47" event={"ID":"fb3fdc07-c6f5-4330-8b00-e454c98ef11d","Type":"ContainerStarted","Data":"570dde847e61534f1693811f27e12ff6ec44b499d95c55bd6b3cd203fadf01b7"} Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.877100 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.877159 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" event={"ID":"81742698-2b71-46b2-93fa-1552cfa27f8a","Type":"ContainerStarted","Data":"71974198774b506a998c7fce21df7aa5430b12fdf9e68953df90710e34dcfa57"} Jan 21 10:58:19 crc kubenswrapper[4925]: I0121 10:58:19.891725 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:19 crc kubenswrapper[4925]: E0121 10:58:19.893047 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.393024605 +0000 UTC m=+191.996916539 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:19.979834 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:19.979951 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.053250 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" event={"ID":"7cbece9b-282a-4634-b41d-85f872a5be93","Type":"ContainerStarted","Data":"01e8828f16474d89895b8f3ded7062845c6e11259ce71280533b06bb41cc4916"} Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.054820 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.054976 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.056020 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.556003011 +0000 UTC m=+192.159894945 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.053998 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.165688 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-2hltc" podStartSLOduration=8.165660149 podStartE2EDuration="8.165660149s" podCreationTimestamp="2026-01-21 10:58:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:19.982380789 +0000 UTC m=+191.586272723" watchObservedRunningTime="2026-01-21 10:58:20.165660149 +0000 UTC m=+191.769552083" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.246418 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-4d577" podStartSLOduration=154.246374576 podStartE2EDuration="2m34.246374576s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:20.168048878 +0000 UTC m=+191.771940812" watchObservedRunningTime="2026-01-21 10:58:20.246374576 +0000 UTC m=+191.850266510" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.246868 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-n2k47" podStartSLOduration=153.246862483 podStartE2EDuration="2m33.246862483s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:20.246046715 +0000 UTC m=+191.849938659" watchObservedRunningTime="2026-01-21 10:58:20.246862483 +0000 UTC m=+191.850754417" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.249495 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.251273 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.751244588 +0000 UTC m=+192.355136522 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.353857 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.354608 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.854588756 +0000 UTC m=+192.458480690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.371016 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-2txwq" podStartSLOduration=153.370966129 podStartE2EDuration="2m33.370966129s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:20.369933165 +0000 UTC m=+191.973825109" watchObservedRunningTime="2026-01-21 10:58:20.370966129 +0000 UTC m=+191.974858063" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.455753 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.456186 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:20.956165436 +0000 UTC m=+192.560057370 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.456226 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-kk7wd"] Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.457292 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-4kjnr" podStartSLOduration=154.457256242 podStartE2EDuration="2m34.457256242s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:20.44394415 +0000 UTC m=+192.047836084" watchObservedRunningTime="2026-01-21 10:58:20.457256242 +0000 UTC m=+192.061148176" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.493190 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vwhv9"] Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.641897 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.642599 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.142579134 +0000 UTC m=+192.746471068 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.697060 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt"] Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.701182 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx"] Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.743839 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.744174 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.24415558 +0000 UTC m=+192.848047514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: W0121 10:58:20.788406 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce9d1cf_7d42_4a6a_91ce_3a1d0afd4605.slice/crio-d0bfd11441b10940356c2d632945b6d289bd44d08b64f92f6b3378e0074afddb WatchSource:0}: Error finding container d0bfd11441b10940356c2d632945b6d289bd44d08b64f92f6b3378e0074afddb: Status 404 returned error can't find the container with id d0bfd11441b10940356c2d632945b6d289bd44d08b64f92f6b3378e0074afddb Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.846099 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.846893 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.346873736 +0000 UTC m=+192.950765670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.888006 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:20 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:20 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:20 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.888171 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:20 crc kubenswrapper[4925]: I0121 10:58:20.947371 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:20 crc kubenswrapper[4925]: E0121 10:58:20.947838 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.447813584 +0000 UTC m=+193.051705518 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.049902 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.050810 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.550792611 +0000 UTC m=+193.154684545 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.077545 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" event={"ID":"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605","Type":"ContainerStarted","Data":"d0bfd11441b10940356c2d632945b6d289bd44d08b64f92f6b3378e0074afddb"} Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.082725 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.088356 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" event={"ID":"be500af9-e814-41db-be2a-e4f3fa9d46bb","Type":"ContainerStarted","Data":"4e7907e1961e3d666394f52ffbb5e93cc27251217ae86dd4d6d78994a53742c7"} Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.113850 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" event={"ID":"91b59e6a-27bf-49a6-99c0-cb20160980ac","Type":"ContainerStarted","Data":"e8d1c043a67ac9eccc740c2aa15b7e4b3f6285667e41c94c91b265566956e66c"} Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.117330 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" event={"ID":"81742698-2b71-46b2-93fa-1552cfa27f8a","Type":"ContainerStarted","Data":"e5c788baae46c551ec8a2fbbf7cd238f5860fc751b9255ba65cc8c06a5670651"} Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.124892 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" event={"ID":"5a264bb6-3e63-4411-b0a4-95be21527653","Type":"ContainerStarted","Data":"2b2a1d0039b60dea73da2fdff320236f10e2d8da4288155b2c5b5fde3766cd28"} Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.132250 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" event={"ID":"b4eed50b-ef22-4637-9aa1-d8528310aed1","Type":"ContainerStarted","Data":"787b4c037b08656b406fd5e3fb873a6d54f00ffb51fbb5fdcdc8f707a66b147d"} Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.144009 4925 patch_prober.go:28] interesting pod/console-operator-58897d9998-4d577 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/readyz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.146148 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4d577" podUID="78713d9a-139c-4d4e-8068-a0d0d98b86df" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/readyz\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.179162 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.179736 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.679708098 +0000 UTC m=+193.283600032 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.186858 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g66gm" podStartSLOduration=155.186818633 podStartE2EDuration="2m35.186818633s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:21.180238144 +0000 UTC m=+192.784130078" watchObservedRunningTime="2026-01-21 10:58:21.186818633 +0000 UTC m=+192.790710567" Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.191858 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-7lrsj"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.207805 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b7tzs"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.212015 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-bmpxp" podStartSLOduration=154.211985568 podStartE2EDuration="2m34.211985568s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:21.211040336 +0000 UTC m=+192.814932270" watchObservedRunningTime="2026-01-21 10:58:21.211985568 +0000 UTC m=+192.815877502" Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.256284 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.277474 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vw8cb"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.283521 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.286639 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.292289 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.293477 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx"] Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.298920 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.798896421 +0000 UTC m=+193.402788355 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.308198 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.326599 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dz6wr"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.326620 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-pxkk7"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.380948 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.398702 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.416717 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.417252 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:21.917231667 +0000 UTC m=+193.521123601 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.441407 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8ht27"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.447080 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.452007 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.479867 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.539883 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.552498 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.052462612 +0000 UTC m=+193.656354536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.592924 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-85pbp"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.593029 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.593047 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bhfrw"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.593471 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.593495 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.629333 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.631010 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vz4nw"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.632206 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.648590 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.672527 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.672951 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.172931948 +0000 UTC m=+193.776823882 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.701149 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.719744 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.775518 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.776086 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.2760697 +0000 UTC m=+193.879961634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: W0121 10:58:21.795506 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb22e36ec_37f7_4d2c_87fb_ce56d5436a8d.slice/crio-7122b0bc300a4117a30c8ee39a503b0f07e8d91235168cec2386516053c94554 WatchSource:0}: Error finding container 7122b0bc300a4117a30c8ee39a503b0f07e8d91235168cec2386516053c94554: Status 404 returned error can't find the container with id 7122b0bc300a4117a30c8ee39a503b0f07e8d91235168cec2386516053c94554 Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.843170 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k9srb"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.876539 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.877263 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.377233715 +0000 UTC m=+193.981125649 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.884824 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:21 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:21 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:21 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.884919 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.909910 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qrrl6"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.923041 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4"] Jan 21 10:58:21 crc kubenswrapper[4925]: I0121 10:58:21.978314 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:21 crc kubenswrapper[4925]: E0121 10:58:21.978744 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.478729692 +0000 UTC m=+194.082621626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: W0121 10:58:22.062730 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc98e3838_5bee_44ee_8fca_b5b429cef61e.slice/crio-b5f05166f0345dd2319083e38aabd7af540425e13fe8736b1a6270fd660e7abb WatchSource:0}: Error finding container b5f05166f0345dd2319083e38aabd7af540425e13fe8736b1a6270fd660e7abb: Status 404 returned error can't find the container with id b5f05166f0345dd2319083e38aabd7af540425e13fe8736b1a6270fd660e7abb Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.080570 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.081097 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.581079888 +0000 UTC m=+194.184971822 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.160100 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" event={"ID":"8f1540bb-bd69-4f44-ac02-8da0575056e1","Type":"ContainerStarted","Data":"ef5d2eee6c13c1ab928d64110e81b48ac01799bec489b1d52f703dbc65d2e399"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.163181 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" event={"ID":"c98e3838-5bee-44ee-8fca-b5b429cef61e","Type":"ContainerStarted","Data":"b5f05166f0345dd2319083e38aabd7af540425e13fe8736b1a6270fd660e7abb"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.188142 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.188963 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.688941966 +0000 UTC m=+194.292833900 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.226846 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" event={"ID":"b9b96df7-05d7-4cd0-9e30-c5e485f31804","Type":"ContainerStarted","Data":"891df69a2dfa7cdb736cfaf87c980e666b4fe576b801b537f28737a5f1270a72"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.241187 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" event={"ID":"e0376dda-f02a-464e-ae41-18d6fddd7097","Type":"ContainerStarted","Data":"fffea0a266d72dc393c0f3f22fbd56cd2c8d5275dbb743ad00af4a336325835a"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.243662 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" event={"ID":"c93dc177-affe-4232-9b28-fd8006418818","Type":"ContainerStarted","Data":"ed37944daebe8d8605d13de76fcab4f5347631a0808c93557e67954b18577031"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.244997 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" event={"ID":"ee785c06-3ec0-4917-a762-a5a8c178b95a","Type":"ContainerStarted","Data":"16b8eb5f7a3626fc7516b9e60dd71e21f35b57dbb8897df704e481ea5643a097"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.248597 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" event={"ID":"6405bedd-bfe2-411b-937d-8f309fc6d0e8","Type":"ContainerStarted","Data":"299730b35fb0bc6b7b885a5d8e4db3e4cfc3a2b9f1386e7db3f635219a44436c"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.253087 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" event={"ID":"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8","Type":"ContainerStarted","Data":"fa2adfa92bae6f7049137495f5a28c9f64e4b348629eebd5220220b2dcc9a565"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.258299 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerStarted","Data":"143d18131be0b4ce5ec547676623c771976a385cfae4b8ddbd78f4257882bef8"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.261921 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" event={"ID":"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d","Type":"ContainerStarted","Data":"7122b0bc300a4117a30c8ee39a503b0f07e8d91235168cec2386516053c94554"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.287891 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" event={"ID":"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9","Type":"ContainerStarted","Data":"6c240036bcbf63da0ee6c1c543c2c01396457f727d9e8215bdef1cc338163149"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.289544 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.289970 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.789949526 +0000 UTC m=+194.393841460 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.290844 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" event={"ID":"e265c5ca-d8d4-4ba0-81db-fd48d3974762","Type":"ContainerStarted","Data":"0374d13c3760c540d1d3ab679081cf77f91350f35e89f4c22f790e47931a41d5"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.300658 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" event={"ID":"91b59e6a-27bf-49a6-99c0-cb20160980ac","Type":"ContainerStarted","Data":"c900b524a36e8b09417b9c8864982bf6f8f39ad5fe663135e337e006247ae42e"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.303074 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-85pbp" event={"ID":"0bccbd0b-3782-4ad2-bdd5-f21e5109165d","Type":"ContainerStarted","Data":"5cf6a2985acaab99e08c1f7a3b91f6f9ed7b60bfe53792fc9b272befec92b11d"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.306481 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" event={"ID":"5cb8f784-0f6a-43c7-a37a-d7f65668af7a","Type":"ContainerStarted","Data":"64c3965e2b9b626d860d4bd64ab5429fafa4a4cb103621e8bfb578e6c066f778"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.323093 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" event={"ID":"68968bee-6187-43fa-bad4-ab1eb83e9c68","Type":"ContainerStarted","Data":"3143d70223ea42ba179d1f984da70b5f5bec19ad66eb2c76882bbbf89c41a2f3"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.331940 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" event={"ID":"15dcf9e9-44e8-4662-9f3d-6cef771808c5","Type":"ContainerStarted","Data":"ae0b863a82064d0fcffb6f05407fc851fa4c22f6dddaad6d1d5d9545375c704b"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.344357 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" event={"ID":"dfbafd02-2fcf-4a25-a454-ade91c336036","Type":"ContainerStarted","Data":"9120ee11dc63637a44dddf43d8c33da4120ee920bb7a75beae30b129782bbef0"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.359187 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" event={"ID":"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605","Type":"ContainerStarted","Data":"ab82c8430ddacd9b4cd8526748cfaf096289eda3d3af7d7c2fdd7b1d05607489"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.382888 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" event={"ID":"4d61279b-b5c2-440f-9bac-689a27484f8c","Type":"ContainerStarted","Data":"84b80891ea29464de755e84f55817a5383f3706e5e3b5b12b6736436b6516ea7"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.387230 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" event={"ID":"5dccce36-49ce-4eea-ac64-60faf9ba2e04","Type":"ContainerStarted","Data":"a8969a24a36a0544521a77627c5574884018970985687d0b3108fe6d0d60f748"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.391344 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.391823 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.891807075 +0000 UTC m=+194.495699009 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.410604 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" event={"ID":"434007d9-38af-49cd-a16f-09c87531b8c1","Type":"ContainerStarted","Data":"fe4ad7046d1a72a5c5bb322f5c972cd8de2a9082e5afdbdb1de368f7c69bf4da"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.423868 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" event={"ID":"be085cb8-85ab-409e-a8cb-3d02cd7153f6","Type":"ContainerStarted","Data":"67930d3b1ceb938082301fec2303d33463fc8c0321c315dd88c3885188c23e1e"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.450901 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" event={"ID":"dffa6415-1a36-41b0-9919-a04bea0bdff8","Type":"ContainerStarted","Data":"b9e26bcba3ac3e3f6d21a121da95ff8a317c52d122aed8e0b410cc4755a8fa76"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.462954 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" event={"ID":"cfd1ca7f-583a-40a7-a485-fb01c60d77c6","Type":"ContainerStarted","Data":"d61fe4a8e3b4e96fd2093f7aeeaec4e5f29809ae3af9f5d0f501e0e07d2ce1b7"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.463926 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" event={"ID":"3dddbbd4-eb3f-436d-8c53-cf413cecca31","Type":"ContainerStarted","Data":"db9095b83bee1784c053bcb48c8636cfd99235398b98ac650ed862b48e922adc"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.473504 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" event={"ID":"5c2fa6a9-ee76-4308-a8f1-095d9720c688","Type":"ContainerStarted","Data":"cacb08d3cc62e62df727a9e809befd4cc00cab0d56f057c34d7229df2832a381"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.474920 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vz4nw" event={"ID":"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6","Type":"ContainerStarted","Data":"e2c1efb51de33c3b975019296113f29ae09be1af4a5bbf23bc08dbb0953f82e6"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.484037 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" event={"ID":"5ad56032-3192-4b42-b7ca-a8c3bac978d1","Type":"ContainerStarted","Data":"ba6666a3f3faff8d92141542b0fae572852497ba69870b07daeb1ee540004e12"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.486113 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" event={"ID":"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e","Type":"ContainerStarted","Data":"0712eb6592216660049ce3211d70ddb01e6c7033dcf1535445131a98c2c3a830"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.487194 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" event={"ID":"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1","Type":"ContainerStarted","Data":"3f01e84eb086d306a8554d9cfc7d77b639703baeed94236b07b5103304800acf"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.488702 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7lrsj" event={"ID":"59445cd0-2391-49e1-9a4e-6ca280c8ab85","Type":"ContainerStarted","Data":"e8702c105249c0af8a0934331b188edb3059ed7ecc3f38c7760e1d63c4f49bb4"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.489996 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" event={"ID":"05b134e2-b96d-4a00-9681-6c8ce017bc74","Type":"ContainerStarted","Data":"89826967a95f9ab5f498d9d61f2b189a0dacf50009439698b4ebbfc574fa3590"} Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.492174 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.492587 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.992552377 +0000 UTC m=+194.596444311 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.493262 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.494877 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:22.994856354 +0000 UTC m=+194.598748508 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.501333 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-4d577" Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.594904 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.595065 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:23.095034986 +0000 UTC m=+194.698926920 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.595850 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:22 crc kubenswrapper[4925]: E0121 10:58:22.597700 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:23.097683834 +0000 UTC m=+194.701575848 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:22 crc kubenswrapper[4925]: I0121 10:58:22.998641 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:22 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:22 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:22 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:23 crc kubenswrapper[4925]: I0121 10:58:22.998778 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:23 crc kubenswrapper[4925]: I0121 10:58:22.999421 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:23 crc kubenswrapper[4925]: E0121 10:58:23.005850 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:23.505821103 +0000 UTC m=+195.109713037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:23 crc kubenswrapper[4925]: I0121 10:58:23.278274 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:23 crc kubenswrapper[4925]: E0121 10:58:23.278877 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:23.778857931 +0000 UTC m=+195.382749865 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:23 crc kubenswrapper[4925]: I0121 10:58:23.839587 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:23 crc kubenswrapper[4925]: E0121 10:58:23.847409 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:24.847329419 +0000 UTC m=+196.451221353 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.063669 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.063864 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:24.56384551 +0000 UTC m=+196.167737444 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.064747 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.221444 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:24 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:24 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:24 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.221873 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.239300 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:24.73926688 +0000 UTC m=+196.343158814 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.241070 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.243656 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:24.743604013 +0000 UTC m=+196.347496098 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.243916 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.246655 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:24.746643094 +0000 UTC m=+196.350535028 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.350032 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.350620 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:24.850600293 +0000 UTC m=+196.454492227 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.370236 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" event={"ID":"cfd1ca7f-583a-40a7-a485-fb01c60d77c6","Type":"ContainerStarted","Data":"c2505cc6bdc6d8b9f950ef0171cbcbe6e2aadb34ff9202286a871e49cf436caa"} Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.565237 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.567629 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" event={"ID":"b9b96df7-05d7-4cd0-9e30-c5e485f31804","Type":"ContainerStarted","Data":"0ca668226a5afdf44232a676478a95ff6f453db6511b1be21d5d2314fbc5bc79"} Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.573819 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:25.073793069 +0000 UTC m=+196.677685003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.665611 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-b7tzs" podStartSLOduration=157.66556847 podStartE2EDuration="2m37.66556847s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:24.662133467 +0000 UTC m=+196.266025401" watchObservedRunningTime="2026-01-21 10:58:24.66556847 +0000 UTC m=+196.269460424" Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.677442 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" event={"ID":"6405bedd-bfe2-411b-937d-8f309fc6d0e8","Type":"ContainerStarted","Data":"ce96316c65168cc85d1a4b43cf24ae8b8ed488552871741ddeedd47dece0a0db"} Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.678224 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.679375 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:25.179348496 +0000 UTC m=+196.783240440 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.815587 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.820615 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" event={"ID":"dffa6415-1a36-41b0-9919-a04bea0bdff8","Type":"ContainerStarted","Data":"8165c317e33d99852664d92489655fadf66935df5cf31a62ee81040d1d87d397"} Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.821412 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:25.321365954 +0000 UTC m=+196.925257888 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.839105 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" event={"ID":"05b134e2-b96d-4a00-9681-6c8ce017bc74","Type":"ContainerStarted","Data":"486d7b7bb73708ae6fedf475279e0690b1eca257d7696cb8bdc2fadd720c9dd9"} Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.885626 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.892052 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:24 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:24 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:24 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.892105 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.907510 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" event={"ID":"91b59e6a-27bf-49a6-99c0-cb20160980ac","Type":"ContainerStarted","Data":"060dc527ced268fbb39679f1042a977afaa76e2c09d72d57c0ddc2f775eb82fc"} Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.923602 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:24 crc kubenswrapper[4925]: E0121 10:58:24.926683 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:25.426657622 +0000 UTC m=+197.030549556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.940520 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" event={"ID":"be500af9-e814-41db-be2a-e4f3fa9d46bb","Type":"ContainerStarted","Data":"87b7227a3131911cde189b1126b478ada899b608260acbd06455fd1740a32dfd"} Jan 21 10:58:24 crc kubenswrapper[4925]: I0121 10:58:24.968736 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77kwx" podStartSLOduration=158.968705927 podStartE2EDuration="2m38.968705927s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:24.884759634 +0000 UTC m=+196.488651568" watchObservedRunningTime="2026-01-21 10:58:24.968705927 +0000 UTC m=+196.572597871" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.282828 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" event={"ID":"8f1540bb-bd69-4f44-ac02-8da0575056e1","Type":"ContainerStarted","Data":"93d15aad339a63927328769ce895c0e16bd3af9dddd9e8a4aa52b91a69588839"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.283598 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.287915 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7lrsj" event={"ID":"59445cd0-2391-49e1-9a4e-6ca280c8ab85","Type":"ContainerStarted","Data":"8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.291705 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" event={"ID":"5c2fa6a9-ee76-4308-a8f1-095d9720c688","Type":"ContainerStarted","Data":"358019572bf5106ca4abd26357a0f4e4d462f9cafe51acef43d98e8ecbb11134"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.297050 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" event={"ID":"dfbafd02-2fcf-4a25-a454-ade91c336036","Type":"ContainerStarted","Data":"01f227e50b9a3ff634f3440daa531f5f152ed6a31cf1d12a73067d74d307ed85"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.300193 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.302176 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:25.802154828 +0000 UTC m=+197.406046852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.308549 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" event={"ID":"b4eed50b-ef22-4637-9aa1-d8528310aed1","Type":"ContainerStarted","Data":"b0af228001ee8dd1a524a54390aff7f32360db1a3f5c86cf859a82ff5638775b"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.309597 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.325628 4925 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-dz6wr container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.325779 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" podUID="8f1540bb-bd69-4f44-ac02-8da0575056e1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.328045 4925 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vwhv9 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" start-of-body= Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.328502 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.329556 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" event={"ID":"5ad56032-3192-4b42-b7ca-a8c3bac978d1","Type":"ContainerStarted","Data":"0974e7b3ef2f1b912d7797b7ef3ef455d39ee52bb08e5b1c75edbd6b617eb4c3"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.335891 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-q4cqt" podStartSLOduration=157.335862676 podStartE2EDuration="2m37.335862676s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.330385225 +0000 UTC m=+196.934277159" watchObservedRunningTime="2026-01-21 10:58:25.335862676 +0000 UTC m=+196.939754600" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.340810 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w2zsd" podStartSLOduration=158.340783649 podStartE2EDuration="2m38.340783649s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:24.978925936 +0000 UTC m=+196.582817880" watchObservedRunningTime="2026-01-21 10:58:25.340783649 +0000 UTC m=+196.944675583" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.341468 4925 generic.go:334] "Generic (PLEG): container finished" podID="5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605" containerID="ab82c8430ddacd9b4cd8526748cfaf096289eda3d3af7d7c2fdd7b1d05607489" exitCode=0 Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.341618 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" event={"ID":"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605","Type":"ContainerDied","Data":"ab82c8430ddacd9b4cd8526748cfaf096289eda3d3af7d7c2fdd7b1d05607489"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.364220 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerStarted","Data":"bde04c60608718c197f551117feaef2b20fbbd8bb179be6bbac5f80533954ae9"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.364990 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cq2j2" podStartSLOduration=158.364968372 podStartE2EDuration="2m38.364968372s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.36429549 +0000 UTC m=+196.968187424" watchObservedRunningTime="2026-01-21 10:58:25.364968372 +0000 UTC m=+196.968860306" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.365530 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.378261 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" event={"ID":"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1","Type":"ContainerStarted","Data":"9edc1e2a18a0c9ea071b1b67200624b28a0ea24fceb803b9aeb60cfc92ac9908"} Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.378679 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.378746 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.379040 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.394370 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podStartSLOduration=159.394338746 podStartE2EDuration="2m39.394338746s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.392862878 +0000 UTC m=+196.996754842" watchObservedRunningTime="2026-01-21 10:58:25.394338746 +0000 UTC m=+196.998230690" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.401056 4925 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-nrk92 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.401151 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.401720 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.402799 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:25.902776926 +0000 UTC m=+197.506668860 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.424384 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-7lrsj" podStartSLOduration=159.424341121 podStartE2EDuration="2m39.424341121s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.417913359 +0000 UTC m=+197.021805303" watchObservedRunningTime="2026-01-21 10:58:25.424341121 +0000 UTC m=+197.028233055" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.471667 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2fd99" podStartSLOduration=158.47163903 podStartE2EDuration="2m38.47163903s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.46891808 +0000 UTC m=+197.072810014" watchObservedRunningTime="2026-01-21 10:58:25.47163903 +0000 UTC m=+197.075530964" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.474037 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" podStartSLOduration=158.47402662 podStartE2EDuration="2m38.47402662s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.443981783 +0000 UTC m=+197.047873717" watchObservedRunningTime="2026-01-21 10:58:25.47402662 +0000 UTC m=+197.077918554" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.496540 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-vw8cb" podStartSLOduration=158.496511456 podStartE2EDuration="2m38.496511456s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.492997669 +0000 UTC m=+197.096889603" watchObservedRunningTime="2026-01-21 10:58:25.496511456 +0000 UTC m=+197.100403390" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.504298 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.504738 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.004723388 +0000 UTC m=+197.608615322 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.514068 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" podStartSLOduration=157.514043167 podStartE2EDuration="2m37.514043167s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:25.51112737 +0000 UTC m=+197.115019304" watchObservedRunningTime="2026-01-21 10:58:25.514043167 +0000 UTC m=+197.117935091" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.605735 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.605885 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.105864363 +0000 UTC m=+197.709756297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.606285 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.606878 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.106853246 +0000 UTC m=+197.710745180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.708358 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.709250 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.209227342 +0000 UTC m=+197.813119276 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.810062 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.810632 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.310616814 +0000 UTC m=+197.914508748 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.875723 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:25 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:25 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:25 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.875801 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:25 crc kubenswrapper[4925]: I0121 10:58:25.910939 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:25 crc kubenswrapper[4925]: E0121 10:58:25.911421 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.411375357 +0000 UTC m=+198.015267291 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.258019 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:26 crc kubenswrapper[4925]: E0121 10:58:26.259639 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.759619579 +0000 UTC m=+198.363511513 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.362189 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:26 crc kubenswrapper[4925]: E0121 10:58:26.362529 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.862496342 +0000 UTC m=+198.466388276 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.363006 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:26 crc kubenswrapper[4925]: E0121 10:58:26.363712 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:26.863694082 +0000 UTC m=+198.467586016 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.371978 4925 csr.go:261] certificate signing request csr-mcsq4 is approved, waiting to be issued Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.383859 4925 csr.go:257] certificate signing request csr-mcsq4 is issued Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.388699 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" event={"ID":"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9","Type":"ContainerStarted","Data":"4fe4bcd20ba99be2481bc0bc9554cbe2a025294265de1ba75b7174929f44c3e1"} Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.391951 4925 generic.go:334] "Generic (PLEG): container finished" podID="e0376dda-f02a-464e-ae41-18d6fddd7097" containerID="eb812502c9db0df4adba4d88c9fb91a87e122e0aa5a6c4053b4da6721f88ca41" exitCode=0 Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.392037 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" event={"ID":"e0376dda-f02a-464e-ae41-18d6fddd7097","Type":"ContainerDied","Data":"eb812502c9db0df4adba4d88c9fb91a87e122e0aa5a6c4053b4da6721f88ca41"} Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.394668 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" event={"ID":"2ce7cc91-68ce-4bcc-99a0-436380c8a2e8","Type":"ContainerStarted","Data":"b1aa24314936fcae574077c767c201043e97acfab7165c966eb0cde2ac13934f"} Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.412144 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" event={"ID":"be085cb8-85ab-409e-a8cb-3d02cd7153f6","Type":"ContainerStarted","Data":"bc1003c326579c1e95bfec6ecbb9b3cb2869509db4a58908944eb43e71311d87"} Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.412420 4925 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-dz6wr container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.412501 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" podUID="8f1540bb-bd69-4f44-ac02-8da0575056e1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.412802 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.412864 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.413997 4925 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vwhv9 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" start-of-body= Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.414043 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.414493 4925 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-nrk92 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.414608 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.550477 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:26 crc kubenswrapper[4925]: E0121 10:58:26.555108 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.055066539 +0000 UTC m=+198.658958473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.667285 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:26 crc kubenswrapper[4925]: E0121 10:58:26.668806 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.168784942 +0000 UTC m=+198.772676876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.827300 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:26 crc kubenswrapper[4925]: E0121 10:58:26.828025 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.327979253 +0000 UTC m=+198.931871197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.874993 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:26 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:26 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:26 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.875136 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:26 crc kubenswrapper[4925]: I0121 10:58:26.929428 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:26 crc kubenswrapper[4925]: E0121 10:58:26.929918 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.429904255 +0000 UTC m=+199.033796189 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.030912 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.031452 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.531431522 +0000 UTC m=+199.135323446 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.132028 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.132561 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.632543826 +0000 UTC m=+199.236435770 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.232822 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.233429 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.733382831 +0000 UTC m=+199.337274765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.334126 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.334872 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.834854867 +0000 UTC m=+199.438746801 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.404302 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-21 10:53:26 +0000 UTC, rotation deadline is 2026-12-07 12:37:07.546954435 +0000 UTC Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.404379 4925 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7681h38m40.142580292s for next certificate rotation Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.435371 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.435669 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.935582209 +0000 UTC m=+199.539474143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.436074 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.436793 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:27.936782959 +0000 UTC m=+199.540674893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.469154 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.472066 4925 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vwhv9 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" start-of-body= Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.472215 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.472933 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.472968 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.537828 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.538233 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:28.038148401 +0000 UTC m=+199.642040335 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.539467 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.540202 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:28.040176349 +0000 UTC m=+199.644068283 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.640447 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.641218 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:28.14119906 +0000 UTC m=+199.745090994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.654268 4925 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5l4bq container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.654386 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" podUID="be085cb8-85ab-409e-a8cb-3d02cd7153f6" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.788636 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:27 crc kubenswrapper[4925]: E0121 10:58:27.789206 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:28.289188989 +0000 UTC m=+199.893080923 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.796639 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" podStartSLOduration=159.796610075 podStartE2EDuration="2m39.796610075s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:27.795985604 +0000 UTC m=+199.399877548" watchObservedRunningTime="2026-01-21 10:58:27.796610075 +0000 UTC m=+199.400502019" Jan 21 10:58:27 crc kubenswrapper[4925]: I0121 10:58:27.799123 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-28hkz" podStartSLOduration=160.799108047 podStartE2EDuration="2m40.799108047s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:26.422835614 +0000 UTC m=+198.026727578" watchObservedRunningTime="2026-01-21 10:58:27.799108047 +0000 UTC m=+199.402999981" Jan 21 10:58:28 crc kubenswrapper[4925]: I0121 10:58:28.098051 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:28 crc kubenswrapper[4925]: I0121 10:58:28.105535 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:28 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:28 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:28 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:28 crc kubenswrapper[4925]: I0121 10:58:28.105597 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:28 crc kubenswrapper[4925]: E0121 10:58:28.107164 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:28.607113185 +0000 UTC m=+200.211005119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:28 crc kubenswrapper[4925]: I0121 10:58:28.107485 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:28 crc kubenswrapper[4925]: E0121 10:58:28.107888 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:28.60787851 +0000 UTC m=+200.211770444 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:28.847999 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:28.851263 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:29.85120641 +0000 UTC m=+201.455098344 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:28.887471 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:29 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:29 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:29 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:28.887659 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:28.955957 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.033528 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:29.533494685 +0000 UTC m=+201.137386619 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.136360 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.165504 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:29.665479863 +0000 UTC m=+201.269371797 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.422323 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.443742 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:29.943698533 +0000 UTC m=+201.547590467 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.476548 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.477558 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:29.977532105 +0000 UTC m=+201.581424039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.603193 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.603551 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:30.103522884 +0000 UTC m=+201.707414818 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.604256 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.604712 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:30.104703753 +0000 UTC m=+201.708595687 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.774926 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.775463 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:30.275372485 +0000 UTC m=+201.879264419 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.877820 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.878671 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:30.37864405 +0000 UTC m=+201.982535984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.981035 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:29 crc kubenswrapper[4925]: E0121 10:58:29.982639 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:30.482607699 +0000 UTC m=+202.086499633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.987689 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:29 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:29 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:29 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:29 crc kubenswrapper[4925]: I0121 10:58:29.987788 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.427503 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:30 crc kubenswrapper[4925]: E0121 10:58:30.428701 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:30.928679516 +0000 UTC m=+202.532571450 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.502961 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.547956 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.557464 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" event={"ID":"dffa6415-1a36-41b0-9919-a04bea0bdff8","Type":"ContainerStarted","Data":"93fb47ad754e5e8c158f6d47249ddbca3a614e968e66274f3ceb810ac9ae05ce"} Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.561918 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.566184 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" event={"ID":"ee785c06-3ec0-4917-a762-a5a8c178b95a","Type":"ContainerStarted","Data":"7b566d6b5ae2343813c4f7835491ac1836c190bb29cae139fef0f7e3ff6ebd4b"} Jan 21 10:58:30 crc kubenswrapper[4925]: E0121 10:58:30.584178 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.084122842 +0000 UTC m=+202.688014776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.639144 4925 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-krz9k container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.639509 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" podUID="b22e36ec-37f7-4d2c-87fb-ce56d5436a8d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.648810 4925 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-z9nt4 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.648966 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" podUID="c98e3838-5bee-44ee-8fca-b5b429cef61e" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.655909 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:30 crc kubenswrapper[4925]: E0121 10:58:30.662753 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.16273052 +0000 UTC m=+202.766622444 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.703351 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.704945 4925 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5l4bq container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.704996 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" podUID="be085cb8-85ab-409e-a8cb-3d02cd7153f6" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.768903 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:30 crc kubenswrapper[4925]: E0121 10:58:30.779863 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.279826944 +0000 UTC m=+202.883718888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.797589 4925 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8ht27 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.797701 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.911709 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:30 crc kubenswrapper[4925]: E0121 10:58:30.915525 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.415504425 +0000 UTC m=+203.019396359 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.975901 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:30 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:30 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:30 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:30 crc kubenswrapper[4925]: I0121 10:58:30.976038 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.023417 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.023821 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.523800628 +0000 UTC m=+203.127692562 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.162018 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.162614 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.662599303 +0000 UTC m=+203.266491237 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.267783 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.268261 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.768241697 +0000 UTC m=+203.372133631 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.408523 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.409315 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:31.909301256 +0000 UTC m=+203.513193190 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.606751 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.613235 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.11319363 +0000 UTC m=+203.717085564 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.613387 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.614078 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.114063268 +0000 UTC m=+203.717955202 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.715618 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.717249 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.217217771 +0000 UTC m=+203.821109705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:31 crc kubenswrapper[4925]: I0121 10:58:31.819941 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:31 crc kubenswrapper[4925]: E0121 10:58:31.821620 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.321585722 +0000 UTC m=+203.925477656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.005942 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:32 crc kubenswrapper[4925]: E0121 10:58:32.063370 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.563300451 +0000 UTC m=+204.167192395 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.113050 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:32 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:32 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:32 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.113176 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.113524 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:32 crc kubenswrapper[4925]: E0121 10:58:32.114185 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.614158058 +0000 UTC m=+204.218049992 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.197426 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" event={"ID":"be500af9-e814-41db-be2a-e4f3fa9d46bb","Type":"ContainerStarted","Data":"c6f61e792c1b93688b291efaa65861f9c20674f6e3f1f5a998a32baf45d75916"} Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.219539 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:32 crc kubenswrapper[4925]: E0121 10:58:32.220098 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.720080102 +0000 UTC m=+204.323972036 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.358489 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:32 crc kubenswrapper[4925]: E0121 10:58:32.359003 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:32.85898519 +0000 UTC m=+204.462877124 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.359721 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" event={"ID":"e265c5ca-d8d4-4ba0-81db-fd48d3974762","Type":"ContainerStarted","Data":"8f2ac1f9c0d185676c4fe226a89692d6963314815e3569e4b1ccafa3ec06ae90"} Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.548788 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:32 crc kubenswrapper[4925]: E0121 10:58:32.550258 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.050217808 +0000 UTC m=+204.654109742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.560441 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" event={"ID":"c93dc177-affe-4232-9b28-fd8006418818","Type":"ContainerStarted","Data":"a35862157242668a58ee82058410788858c6dc59755e10a2ef0bfb9b725f61d0"} Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.567300 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vz4nw" event={"ID":"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6","Type":"ContainerStarted","Data":"8327ac029ce854c8ba52a79340c9777cc569f2371808519ed3357ec90a2ee42f"} Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.850813 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:32 crc kubenswrapper[4925]: E0121 10:58:32.854271 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.354246798 +0000 UTC m=+204.958138732 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.915481 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" event={"ID":"b22e36ec-37f7-4d2c-87fb-ce56d5436a8d","Type":"ContainerStarted","Data":"a9db720f14aa04f497c92928d38f66c1038b7489afa683d8b9947772ecda3c70"} Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.918582 4925 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-krz9k container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.918666 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" podUID="b22e36ec-37f7-4d2c-87fb-ce56d5436a8d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.933239 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:32 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:32 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:32 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.933348 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:32 crc kubenswrapper[4925]: I0121 10:58:32.953920 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:32 crc kubenswrapper[4925]: E0121 10:58:32.956270 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.456243052 +0000 UTC m=+205.060134986 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.023848 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" event={"ID":"5cb8f784-0f6a-43c7-a37a-d7f65668af7a","Type":"ContainerStarted","Data":"a1bd7dc1b0abe6430d56dd379bd956773cf579cf387a0cf3987dd0e2770e515c"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.057101 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.061750 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.56171594 +0000 UTC m=+205.165607874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.112805 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" event={"ID":"68968bee-6187-43fa-bad4-ab1eb83e9c68","Type":"ContainerStarted","Data":"ab374bbeec044a9763397c48f5c4e9f1abbe5b26276b693babac0512431d3c99"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.113842 4925 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8ht27 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.113919 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.158811 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.158996 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.658971297 +0000 UTC m=+205.262863241 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.159528 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.161597 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.661580344 +0000 UTC m=+205.265472278 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.189578 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-85pbp" event={"ID":"0bccbd0b-3782-4ad2-bdd5-f21e5109165d","Type":"ContainerStarted","Data":"ecb602f7e181a732e7b340cbb7cee9c9ff8c0c2918c5ad405ce56e7c6424fb96"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.193160 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" event={"ID":"c98e3838-5bee-44ee-8fca-b5b429cef61e","Type":"ContainerStarted","Data":"caa964fd726d918b9129bf3650a7945e4b4774b9a2e99286fd2e5bdc42dfd238"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.196420 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" event={"ID":"5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605","Type":"ContainerStarted","Data":"489165caa5330728e4cdcf4a71d617ecc00c9eebc35fb283711ad4df984dd8db"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.197800 4925 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-z9nt4 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.197860 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" podUID="c98e3838-5bee-44ee-8fca-b5b429cef61e" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.204968 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" event={"ID":"4d61279b-b5c2-440f-9bac-689a27484f8c","Type":"ContainerStarted","Data":"f11e9638a39a1114499d05a28a9c02115ccc152ccd9047e1b8b31706ada40184"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.205191 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.208647 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" event={"ID":"5dccce36-49ce-4eea-ac64-60faf9ba2e04","Type":"ContainerStarted","Data":"ff4e1dd5e9f252152702afcfd61261325f399629ce31e323b95d632ff642ca8b"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.301315 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.301507 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.801459034 +0000 UTC m=+205.405350968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.303419 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.303901 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.803880714 +0000 UTC m=+205.407772858 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.306199 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" event={"ID":"434007d9-38af-49cd-a16f-09c87531b8c1","Type":"ContainerStarted","Data":"55bff7098d826805217d3399d59dfee8375648d701da03f1ee342fc90e5f6911"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.340288 4925 generic.go:334] "Generic (PLEG): container finished" podID="3dddbbd4-eb3f-436d-8c53-cf413cecca31" containerID="3cfed7a84f5fb842381d475fc3edb4c0c0e0d2dee10490bc8c6de8b86921976b" exitCode=0 Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.340412 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" event={"ID":"3dddbbd4-eb3f-436d-8c53-cf413cecca31","Type":"ContainerDied","Data":"3cfed7a84f5fb842381d475fc3edb4c0c0e0d2dee10490bc8c6de8b86921976b"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.350994 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" event={"ID":"6405bedd-bfe2-411b-937d-8f309fc6d0e8","Type":"ContainerStarted","Data":"14b7333b3b4d6c7c294c30e0013c775873407b1f26dc71d8f9f2592e7146fc57"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.421663 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.425138 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:33.925114736 +0000 UTC m=+205.529006670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.437090 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" event={"ID":"dfbafd02-2fcf-4a25-a454-ade91c336036","Type":"ContainerStarted","Data":"2c772c7e0991eb1f38062913cb0a937f907d99b0ae6efcfd2a75050156f7948f"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.464989 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" event={"ID":"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e","Type":"ContainerStarted","Data":"7fcb2e1933a8697375688726e49d08bb273801f99821e81870bfcd3b6b2bbba0"} Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.543215 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.546964 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:34.046943447 +0000 UTC m=+205.650835381 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.645158 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.646482 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:34.146452577 +0000 UTC m=+205.750344521 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.882766 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:33 crc kubenswrapper[4925]: E0121 10:58:33.883253 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:34.383239192 +0000 UTC m=+205.987131116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:33 crc kubenswrapper[4925]: I0121 10:58:33.883342 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:33.883533 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.553053 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:34 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:34 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:34 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.553478 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.560519 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.560588 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.562276 4925 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8ht27 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.562459 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.562632 4925 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8ht27 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.562730 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.566502 4925 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-dnjzx container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.566601 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" podUID="5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.566915 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.679962 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.680255 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.693964 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.700489 4925 patch_prober.go:28] interesting pod/console-f9d7485db-7lrsj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 21 10:58:34 crc kubenswrapper[4925]: I0121 10:58:34.700608 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 21 10:58:35 crc kubenswrapper[4925]: I0121 10:58:34.710289 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:35 crc kubenswrapper[4925]: I0121 10:58:35.154888 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:35 crc kubenswrapper[4925]: I0121 10:58:35.153339 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:35 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:35 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:35 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:35 crc kubenswrapper[4925]: I0121 10:58:35.155449 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:35 crc kubenswrapper[4925]: E0121 10:58:35.325438 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:36.325300119 +0000 UTC m=+207.929192053 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.824316 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.857797 4925 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-nrk92 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.857893 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.859298 4925 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5l4bq container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.859325 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" podUID="be085cb8-85ab-409e-a8cb-3d02cd7153f6" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.859569 4925 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5l4bq container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.859590 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" podUID="be085cb8-85ab-409e-a8cb-3d02cd7153f6" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.859658 4925 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vwhv9 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:35.859699 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.031368 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:36 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:36 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:36 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.032028 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198469 4925 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-krz9k container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198581 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" podUID="b22e36ec-37f7-4d2c-87fb-ce56d5436a8d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198731 4925 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-krz9k container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198758 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" podUID="b22e36ec-37f7-4d2c-87fb-ce56d5436a8d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198847 4925 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-z9nt4 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198872 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" podUID="c98e3838-5bee-44ee-8fca-b5b429cef61e" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198952 4925 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-z9nt4 container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.198970 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" podUID="c98e3838-5bee-44ee-8fca-b5b429cef61e" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.247286 4925 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-z9nt4 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.247706 4925 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-krz9k container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.247800 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" podUID="b22e36ec-37f7-4d2c-87fb-ce56d5436a8d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.247766 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" podUID="c98e3838-5bee-44ee-8fca-b5b429cef61e" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.253372 4925 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8ht27 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.253573 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.328469 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.332715 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vz4nw" event={"ID":"6b7fdcdd-7176-41a4-a4a2-3a2db9c672c6","Type":"ContainerStarted","Data":"3062ae9746fd13801bb5286eedcf247d20011d2a12a37b3ae2965b8b8bc3a7e9"} Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.332965 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" event={"ID":"4d61279b-b5c2-440f-9bac-689a27484f8c","Type":"ContainerStarted","Data":"9d09f396a8664a5a33ce19f5a488286190e9729e7c8e9c6ea85700b6fe1d9064"} Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.333046 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" event={"ID":"b97d7bcf-6463-4a55-a0f6-ebe0f3bda8e9","Type":"ContainerStarted","Data":"2b4fb1d924a83aa35eb7991a700f3586216463ad5f21a18b6c17ada7d711cc41"} Jan 21 10:58:36 crc kubenswrapper[4925]: E0121 10:58:36.328803 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:36.82861982 +0000 UTC m=+208.432511764 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.355037 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:36 crc kubenswrapper[4925]: E0121 10:58:36.358425 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:36.858382548 +0000 UTC m=+208.462274482 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.456747 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:36 crc kubenswrapper[4925]: E0121 10:58:36.459526 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:36.959501673 +0000 UTC m=+208.563393607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.681515 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.682907 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.685146 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:36 crc kubenswrapper[4925]: E0121 10:58:36.688111 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:37.188020743 +0000 UTC m=+208.791912677 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.697861 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.709589 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.729490 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.790002 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.790419 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da6cdc16-92f1-4475-99f2-c087b77d76cf-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:36 crc kubenswrapper[4925]: I0121 10:58:36.790491 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da6cdc16-92f1-4475-99f2-c087b77d76cf-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:36 crc kubenswrapper[4925]: E0121 10:58:36.790723 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:37.290677788 +0000 UTC m=+208.894569902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.076710 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da6cdc16-92f1-4475-99f2-c087b77d76cf-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.077891 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da6cdc16-92f1-4475-99f2-c087b77d76cf-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.078138 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:37 crc kubenswrapper[4925]: E0121 10:58:37.078829 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:37.578811276 +0000 UTC m=+209.182703220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.079539 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da6cdc16-92f1-4475-99f2-c087b77d76cf-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.095753 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:37 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:37 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:37 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.095866 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.314854 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:37 crc kubenswrapper[4925]: E0121 10:58:37.315748 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:37.815697184 +0000 UTC m=+209.419589118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.315985 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:37 crc kubenswrapper[4925]: E0121 10:58:37.316619 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:37.816605335 +0000 UTC m=+209.420497269 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.418235 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:37 crc kubenswrapper[4925]: E0121 10:58:37.419078 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:37.919044272 +0000 UTC m=+209.522936206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.433887 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4kbqk" podStartSLOduration=170.433859764 podStartE2EDuration="2m50.433859764s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:37.412176324 +0000 UTC m=+209.016068278" watchObservedRunningTime="2026-01-21 10:58:37.433859764 +0000 UTC m=+209.037751698" Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.434865 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" event={"ID":"bebe6bc4-7b86-4688-ab28-408d5fc1ed7e","Type":"ContainerStarted","Data":"2264c54c06457c698183f11efca5b44ff507098b9940ea806d3436c90a0f22c3"} Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.470365 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da6cdc16-92f1-4475-99f2-c087b77d76cf-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.480972 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-bhfrw" podStartSLOduration=169.480951856 podStartE2EDuration="2m49.480951856s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:37.478840576 +0000 UTC m=+209.082732510" watchObservedRunningTime="2026-01-21 10:58:37.480951856 +0000 UTC m=+209.084843790" Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.481649 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" event={"ID":"3dddbbd4-eb3f-436d-8c53-cf413cecca31","Type":"ContainerStarted","Data":"689b9bc277c933b15af7ad088f985c71a77a83b8ef235ddcc71e976ffaca6cdb"} Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.534258 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:37 crc kubenswrapper[4925]: E0121 10:58:37.534795 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:38.034777522 +0000 UTC m=+209.638669456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:37 crc kubenswrapper[4925]: I0121 10:58:37.626526 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" event={"ID":"15dcf9e9-44e8-4662-9f3d-6cef771808c5","Type":"ContainerStarted","Data":"f1f605a07befd7eeea4d088f5b4e849611e1479b1ec90c6ecda55ab3f777962d"} Jan 21 10:58:38 crc kubenswrapper[4925]: I0121 10:58:37.634800 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:38 crc kubenswrapper[4925]: E0121 10:58:37.635346 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:38.135327777 +0000 UTC m=+209.739219711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:38 crc kubenswrapper[4925]: I0121 10:58:37.658292 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" event={"ID":"e0376dda-f02a-464e-ae41-18d6fddd7097","Type":"ContainerStarted","Data":"27f0768b374beb03714cf064401ce9c3898a2acd7280b81b79eb80151527e1ac"} Jan 21 10:58:38 crc kubenswrapper[4925]: I0121 10:58:38.272130 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:38 crc kubenswrapper[4925]: I0121 10:58:38.292663 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" podStartSLOduration=171.292609441 podStartE2EDuration="2m51.292609441s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:37.604287008 +0000 UTC m=+209.208178942" watchObservedRunningTime="2026-01-21 10:58:38.292609441 +0000 UTC m=+209.896501375" Jan 21 10:58:38 crc kubenswrapper[4925]: I0121 10:58:38.302190 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:38 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:38 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:38 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:38 crc kubenswrapper[4925]: I0121 10:58:38.322210 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:38 crc kubenswrapper[4925]: I0121 10:58:38.352231 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:38 crc kubenswrapper[4925]: E0121 10:58:38.387285 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:39.387227349 +0000 UTC m=+210.991119293 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:38.575362 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:39 crc kubenswrapper[4925]: E0121 10:58:38.576652 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:39.076607331 +0000 UTC m=+210.680499265 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:38.712173 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:39 crc kubenswrapper[4925]: E0121 10:58:38.725226 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:39.22517728 +0000 UTC m=+210.829069224 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.674636 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.839491 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:39 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:39 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:39 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.839572 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:39 crc kubenswrapper[4925]: E0121 10:58:39.841027 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:40.840906201 +0000 UTC m=+212.444798145 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.841658 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:39 crc kubenswrapper[4925]: E0121 10:58:39.844139 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:40.343984724 +0000 UTC m=+211.947876658 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.920789 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-vz4nw" podStartSLOduration=27.92075433 podStartE2EDuration="27.92075433s" podCreationTimestamp="2026-01-21 10:58:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:39.904572933 +0000 UTC m=+211.508464887" watchObservedRunningTime="2026-01-21 10:58:39.92075433 +0000 UTC m=+211.524646274" Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.960093 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:39 crc kubenswrapper[4925]: E0121 10:58:39.961957 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:40.461913226 +0000 UTC m=+212.065805160 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.962930 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:39 crc kubenswrapper[4925]: E0121 10:58:39.963642 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:40.463629042 +0000 UTC m=+212.067520976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.978935 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:39 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:39 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:39 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:39 crc kubenswrapper[4925]: I0121 10:58:39.979020 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.002589 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.003033 4925 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-9fbhr container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.003098 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" podUID="3dddbbd4-eb3f-436d-8c53-cf413cecca31" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.068490 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:40 crc kubenswrapper[4925]: E0121 10:58:40.078807 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:40.57872773 +0000 UTC m=+212.182619844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.539019 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:40 crc kubenswrapper[4925]: E0121 10:58:40.540158 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.04013879 +0000 UTC m=+212.644030724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.663541 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jb9kj" podStartSLOduration=173.663498248 podStartE2EDuration="2m53.663498248s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:40.663031683 +0000 UTC m=+212.266923637" watchObservedRunningTime="2026-01-21 10:58:40.663498248 +0000 UTC m=+212.267390182" Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.674882 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:40 crc kubenswrapper[4925]: E0121 10:58:40.675654 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.17560299 +0000 UTC m=+212.779495104 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.675996 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:40 crc kubenswrapper[4925]: E0121 10:58:40.676667 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.176655575 +0000 UTC m=+212.780547519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.791482 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:40 crc kubenswrapper[4925]: E0121 10:58:40.791858 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.291838866 +0000 UTC m=+212.895730800 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.884338 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" podStartSLOduration=172.884307953 podStartE2EDuration="2m52.884307953s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:40.88089924 +0000 UTC m=+212.484791204" watchObservedRunningTime="2026-01-21 10:58:40.884307953 +0000 UTC m=+212.488199907" Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.886655 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:40 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:40 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:40 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.886747 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:40 crc kubenswrapper[4925]: I0121 10:58:40.898819 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:40 crc kubenswrapper[4925]: E0121 10:58:40.899630 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.399614991 +0000 UTC m=+213.003506925 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.002272 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:41 crc kubenswrapper[4925]: E0121 10:58:41.009873 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.509840007 +0000 UTC m=+213.113731941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.123767 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:41 crc kubenswrapper[4925]: E0121 10:58:41.124516 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.624430088 +0000 UTC m=+213.228322022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.160281 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" event={"ID":"e0376dda-f02a-464e-ae41-18d6fddd7097","Type":"ContainerStarted","Data":"a9594a67d3def20f537448591a207f33d0625975b89e8364b2fc9b0c463b6ecd"} Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.160484 4925 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-9fbhr container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.160527 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" podUID="3dddbbd4-eb3f-436d-8c53-cf413cecca31" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.161288 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podStartSLOduration=173.16125124 podStartE2EDuration="2m53.16125124s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:41.160792294 +0000 UTC m=+212.764684238" watchObservedRunningTime="2026-01-21 10:58:41.16125124 +0000 UTC m=+212.765143174" Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.230613 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:41 crc kubenswrapper[4925]: E0121 10:58:41.231275 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:41.731242312 +0000 UTC m=+213.335134246 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.305090 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-vz4nw" Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.656311 4925 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-9fbhr container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.656377 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" podUID="3dddbbd4-eb3f-436d-8c53-cf413cecca31" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.657049 4925 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-9fbhr container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.657072 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" podUID="3dddbbd4-eb3f-436d-8c53-cf413cecca31" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.657321 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:41 crc kubenswrapper[4925]: E0121 10:58:41.658187 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.158161773 +0000 UTC m=+213.762053707 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.784842 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:41 crc kubenswrapper[4925]: E0121 10:58:41.785059 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.285018501 +0000 UTC m=+213.888910435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.785752 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:41 crc kubenswrapper[4925]: E0121 10:58:41.786189 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.28617407 +0000 UTC m=+213.890066004 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.867887 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" podStartSLOduration=173.86786157 podStartE2EDuration="2m53.86786157s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:41.86607064 +0000 UTC m=+213.469962574" watchObservedRunningTime="2026-01-21 10:58:41.86786157 +0000 UTC m=+213.471753504" Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.887228 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:41 crc kubenswrapper[4925]: E0121 10:58:41.887800 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.387781311 +0000 UTC m=+213.991673245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.896706 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:41 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:41 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:41 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:41 crc kubenswrapper[4925]: I0121 10:58:41.896789 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.050536 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.052133 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.552105512 +0000 UTC m=+214.155997446 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.061761 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gqsng" podStartSLOduration=176.061734621 podStartE2EDuration="2m56.061734621s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:42.061490383 +0000 UTC m=+213.665382317" watchObservedRunningTime="2026-01-21 10:58:42.061734621 +0000 UTC m=+213.665626555" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.122193 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jqrcn" podStartSLOduration=175.122155915 podStartE2EDuration="2m55.122155915s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:42.108447921 +0000 UTC m=+213.712339855" watchObservedRunningTime="2026-01-21 10:58:42.122155915 +0000 UTC m=+213.726047839" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.154063 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.154519 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.654498088 +0000 UTC m=+214.258390022 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.289533 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.358615 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.858581579 +0000 UTC m=+214.462473523 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.392217 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.392708 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:42.892686629 +0000 UTC m=+214.496578563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.547881 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.548643 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.048611502 +0000 UTC m=+214.652503436 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.554903 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.590574 4925 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-dnjzx container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]log ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]etcd ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]etcd-readiness ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 21 10:58:42 crc kubenswrapper[4925]: [-]informer-sync failed: reason withheld Jan 21 10:58:42 crc kubenswrapper[4925]: [+]poststarthook/generic-apiserver-start-informers ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]poststarthook/max-in-flight-filter ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 21 10:58:42 crc kubenswrapper[4925]: [+]shutdown ok Jan 21 10:58:42 crc kubenswrapper[4925]: readyz check failed Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.590776 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" podUID="5ce9d1cf-7d42-4a6a-91ce-3a1d0afd4605" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.707682 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.710224 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.210176401 +0000 UTC m=+214.814068385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.777438 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" podStartSLOduration=174.777383201 podStartE2EDuration="2m54.777383201s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:42.562644328 +0000 UTC m=+214.166536382" watchObservedRunningTime="2026-01-21 10:58:42.777383201 +0000 UTC m=+214.381275135" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.777771 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hfk49" podStartSLOduration=175.777762094 podStartE2EDuration="2m55.777762094s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:42.758164133 +0000 UTC m=+214.362056067" watchObservedRunningTime="2026-01-21 10:58:42.777762094 +0000 UTC m=+214.381654028" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.864894 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.865817 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.365790553 +0000 UTC m=+214.969682487 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.959761 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:42 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:42 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:42 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.959845 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:42 crc kubenswrapper[4925]: I0121 10:58:42.967852 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:42 crc kubenswrapper[4925]: E0121 10:58:42.968480 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.468422788 +0000 UTC m=+215.072314732 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.133588 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:43 crc kubenswrapper[4925]: E0121 10:58:43.134220 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.634199237 +0000 UTC m=+215.238091171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.179106 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5ml9s" podStartSLOduration=176.179053445 podStartE2EDuration="2m56.179053445s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:42.994948868 +0000 UTC m=+214.598840802" watchObservedRunningTime="2026-01-21 10:58:43.179053445 +0000 UTC m=+214.782945379" Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.216087 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" podStartSLOduration=175.216048882 podStartE2EDuration="2m55.216048882s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:43.183317057 +0000 UTC m=+214.787208991" watchObservedRunningTime="2026-01-21 10:58:43.216048882 +0000 UTC m=+214.819940826" Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.224105 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.236138 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:43 crc kubenswrapper[4925]: E0121 10:58:43.236852 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.736812981 +0000 UTC m=+215.340704915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.255509 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-kk7wd" podStartSLOduration=176.25548196 podStartE2EDuration="2m56.25548196s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:43.254156307 +0000 UTC m=+214.858048241" watchObservedRunningTime="2026-01-21 10:58:43.25548196 +0000 UTC m=+214.859373894" Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.343335 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:43 crc kubenswrapper[4925]: E0121 10:58:43.344464 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.844446321 +0000 UTC m=+215.448338255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.439831 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2d2sj" podStartSLOduration=176.439800974 podStartE2EDuration="2m56.439800974s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:43.342330311 +0000 UTC m=+214.946222255" watchObservedRunningTime="2026-01-21 10:58:43.439800974 +0000 UTC m=+215.043692908" Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.445444 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:43 crc kubenswrapper[4925]: E0121 10:58:43.446155 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:43.946126254 +0000 UTC m=+215.550018188 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.483673 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"da6cdc16-92f1-4475-99f2-c087b77d76cf","Type":"ContainerStarted","Data":"9cc568cc463b0ae72f04fdc9281c73c300fbe4bee8f5eebe0206dc2a76d6c0e1"} Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.483903 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ntfc2" podStartSLOduration=175.483870477 podStartE2EDuration="2m55.483870477s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:43.481833849 +0000 UTC m=+215.085725803" watchObservedRunningTime="2026-01-21 10:58:43.483870477 +0000 UTC m=+215.087762431" Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.553578 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:43 crc kubenswrapper[4925]: E0121 10:58:43.554071 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.054053745 +0000 UTC m=+215.657945669 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.748258 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:43 crc kubenswrapper[4925]: E0121 10:58:43.749014 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.248988421 +0000 UTC m=+215.852880355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.851761 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dnjzx" Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.853635 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:43 crc kubenswrapper[4925]: E0121 10:58:43.854169 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.354143589 +0000 UTC m=+215.958035523 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.899384 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:43 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:43 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:43 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.899894 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:43 crc kubenswrapper[4925]: I0121 10:58:43.902015 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-85pbp" podStartSLOduration=31.902002147 podStartE2EDuration="31.902002147s" podCreationTimestamp="2026-01-21 10:58:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:43.769355907 +0000 UTC m=+215.373247841" watchObservedRunningTime="2026-01-21 10:58:43.902002147 +0000 UTC m=+215.505894081" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.014455 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.015278 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.515251474 +0000 UTC m=+216.119143408 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.015347 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.017631 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.517603082 +0000 UTC m=+216.121495016 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.061483 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.062180 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.063936 4925 patch_prober.go:28] interesting pod/apiserver-76f77b778f-pxkk7 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.10:8443/livez\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.064108 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" podUID="e0376dda-f02a-464e-ae41-18d6fddd7097" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.10:8443/livez\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.064631 4925 patch_prober.go:28] interesting pod/console-f9d7485db-7lrsj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.064756 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.117672 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.118535 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.618508019 +0000 UTC m=+216.222399963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.193731 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.193870 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.194072 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.194176 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.216055 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.249838 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.250664 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.750639952 +0000 UTC m=+216.354531886 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.361377 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.363505 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.863482225 +0000 UTC m=+216.467374159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.438463 4925 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8ht27 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.438531 4925 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8ht27 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.438545 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.438608 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.438782 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.466532 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.467418 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:44.967376071 +0000 UTC m=+216.571268005 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.510076 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" event={"ID":"15dcf9e9-44e8-4662-9f3d-6cef771808c5","Type":"ContainerStarted","Data":"8eec425e3c96f6dd865a4da6202a1be965ea019122678292a5052bef5067e6bb"} Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.529161 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-k9srb" podStartSLOduration=176.52913166 podStartE2EDuration="2m56.52913166s" podCreationTimestamp="2026-01-21 10:55:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:44.528247061 +0000 UTC m=+216.132138995" watchObservedRunningTime="2026-01-21 10:58:44.52913166 +0000 UTC m=+216.133023604" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.628153 4925 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-9fbhr container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.628226 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" podUID="3dddbbd4-eb3f-436d-8c53-cf413cecca31" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.628343 4925 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-9fbhr container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.628364 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" podUID="3dddbbd4-eb3f-436d-8c53-cf413cecca31" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.671663 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.672885 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.172848818 +0000 UTC m=+216.776740802 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.673139 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.675912 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.175894159 +0000 UTC m=+216.779786303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.733321 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" podStartSLOduration=178.733287392 podStartE2EDuration="2m58.733287392s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:44.725167443 +0000 UTC m=+216.329059407" watchObservedRunningTime="2026-01-21 10:58:44.733287392 +0000 UTC m=+216.337179326" Jan 21 10:58:44 crc kubenswrapper[4925]: I0121 10:58:44.778205 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:44 crc kubenswrapper[4925]: E0121 10:58:44.778804 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.278772411 +0000 UTC m=+216.882664355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.061409 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.062440 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.562419221 +0000 UTC m=+217.166311155 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.074245 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:45 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:45 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:45 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.074336 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.196792 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.197346 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.697322446 +0000 UTC m=+217.301214380 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.298944 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.299623 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.799590498 +0000 UTC m=+217.403482442 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.433578 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.433988 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.933943765 +0000 UTC m=+217.537835719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.434157 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.434756 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:45.934740411 +0000 UTC m=+217.538632355 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.442239 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" podStartSLOduration=179.442206949 podStartE2EDuration="2m59.442206949s" podCreationTimestamp="2026-01-21 10:55:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:45.440301656 +0000 UTC m=+217.044193590" watchObservedRunningTime="2026-01-21 10:58:45.442206949 +0000 UTC m=+217.046098883" Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.516913 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"da6cdc16-92f1-4475-99f2-c087b77d76cf","Type":"ContainerStarted","Data":"00c07a5be7f901effc1f4d1eef550e4e0495263770ac9ea8b9ee4477c338fa20"} Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.572743 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.573937 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:46.073908308 +0000 UTC m=+217.677800252 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.676180 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.678504 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:46.178478827 +0000 UTC m=+217.782370761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.778048 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.778354 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:46.278296938 +0000 UTC m=+217.882188882 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.778839 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.780118 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:46.280096307 +0000 UTC m=+217.883988251 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.946830 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:45 crc kubenswrapper[4925]: E0121 10:58:45.947315 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:46.447294534 +0000 UTC m=+218.051186468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.947513 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:45 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:45 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:45 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.947564 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:45 crc kubenswrapper[4925]: I0121 10:58:45.953030 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5l4bq" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.048771 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:46 crc kubenswrapper[4925]: E0121 10:58:46.049388 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:46.54936954 +0000 UTC m=+218.153261474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.163236 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.165013 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.279699 4925 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.454661 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:46 crc kubenswrapper[4925]: E0121 10:58:46.455850 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:46.955790792 +0000 UTC m=+218.559682726 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.463882 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.478299 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z9nt4" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.483981 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.487942 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-krz9k" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.560932 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.561023 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.561068 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:46 crc kubenswrapper[4925]: E0121 10:58:46.561645 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:47.061605102 +0000 UTC m=+218.665497036 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.570793 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" event={"ID":"15dcf9e9-44e8-4662-9f3d-6cef771808c5","Type":"ContainerStarted","Data":"4a721978ae1b00c8c5a639df35ccb21c4941c6f0f7608c29552e6a1dd11b072a"} Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.662961 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.663275 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.663357 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:46 crc kubenswrapper[4925]: E0121 10:58:46.664215 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 10:58:47.164193975 +0000 UTC m=+218.768085909 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.664755 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.755995 4925 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-21T10:58:46.279746952Z","Handler":null,"Name":""} Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.765092 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:46 crc kubenswrapper[4925]: E0121 10:58:46.765844 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 10:58:47.265809305 +0000 UTC m=+218.869701239 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-m7dl4" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.775062 4925 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.775149 4925 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.812931 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.853511 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.869262 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.916553 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 21 10:58:46 crc kubenswrapper[4925]: I0121 10:58:46.978004 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.013104 4925 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.013162 4925 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.016230 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:47 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:47 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:47 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.016743 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.105812 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.641159 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.660303 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9fbhr" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.695139 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-m7dl4\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.722224 4925 generic.go:334] "Generic (PLEG): container finished" podID="ee785c06-3ec0-4917-a762-a5a8c178b95a" containerID="7b566d6b5ae2343813c4f7835491ac1836c190bb29cae139fef0f7e3ff6ebd4b" exitCode=0 Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.722312 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" event={"ID":"ee785c06-3ec0-4917-a762-a5a8c178b95a","Type":"ContainerDied","Data":"7b566d6b5ae2343813c4f7835491ac1836c190bb29cae139fef0f7e3ff6ebd4b"} Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.757366 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x5pnh"] Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.759018 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:47 crc kubenswrapper[4925]: W0121 10:58:47.789825 4925 reflector.go:561] object-"openshift-marketplace"/"community-operators-dockercfg-dmngl": failed to list *v1.Secret: secrets "community-operators-dockercfg-dmngl" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Jan 21 10:58:47 crc kubenswrapper[4925]: E0121 10:58:47.789916 4925 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"community-operators-dockercfg-dmngl\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"community-operators-dockercfg-dmngl\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 10:58:47 crc kubenswrapper[4925]: I0121 10:58:47.875049 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.876537 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-utilities\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.876637 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kkp5\" (UniqueName: \"kubernetes.io/projected/970344f4-64f6-4ffc-9896-6dd169ca1553-kube-api-access-2kkp5\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.876669 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-catalog-content\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.884840 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:48 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:48 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:48 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.884935 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.997581 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kkp5\" (UniqueName: \"kubernetes.io/projected/970344f4-64f6-4ffc-9896-6dd169ca1553-kube-api-access-2kkp5\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.997705 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-catalog-content\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.997786 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-utilities\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:47.999553 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-utilities\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:48.000964 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-catalog-content\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:48.233750 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=12.233697189 podStartE2EDuration="12.233697189s" podCreationTimestamp="2026-01-21 10:58:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:48.061827997 +0000 UTC m=+219.665719931" watchObservedRunningTime="2026-01-21 10:58:48.233697189 +0000 UTC m=+219.837589123" Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:48.936869 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p5std"] Jan 21 10:58:48 crc kubenswrapper[4925]: I0121 10:58:48.938411 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.077020 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-utilities\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.077111 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz8gh\" (UniqueName: \"kubernetes.io/projected/88c0c83d-a22b-4150-9572-ee68fb5f1e81-kube-api-access-xz8gh\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.077206 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-catalog-content\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.087909 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x5pnh"] Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.119061 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:49 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:49 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:49 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.119739 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.136299 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.136636 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.168590 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k9xnv"] Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.170273 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.174167 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d9qfn"] Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.175539 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.185950 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg7xq\" (UniqueName: \"kubernetes.io/projected/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-kube-api-access-mg7xq\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186053 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-utilities\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186078 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-catalog-content\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186100 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-utilities\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186183 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45hql\" (UniqueName: \"kubernetes.io/projected/c59d1347-a48d-4337-a8d1-2e5bef1f4535-kube-api-access-45hql\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186203 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz8gh\" (UniqueName: \"kubernetes.io/projected/88c0c83d-a22b-4150-9572-ee68fb5f1e81-kube-api-access-xz8gh\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186232 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-catalog-content\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186291 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-utilities\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.186310 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-catalog-content\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.187327 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-utilities\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.187741 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-catalog-content\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.489108 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-utilities\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.489172 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-catalog-content\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.489234 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg7xq\" (UniqueName: \"kubernetes.io/projected/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-kube-api-access-mg7xq\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.489268 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-utilities\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.489288 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-catalog-content\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.489321 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45hql\" (UniqueName: \"kubernetes.io/projected/c59d1347-a48d-4337-a8d1-2e5bef1f4535-kube-api-access-45hql\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.490262 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-catalog-content\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.490444 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-utilities\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.491367 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-utilities\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.491788 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-catalog-content\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.512840 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kkp5\" (UniqueName: \"kubernetes.io/projected/970344f4-64f6-4ffc-9896-6dd169ca1553-kube-api-access-2kkp5\") pod \"community-operators-x5pnh\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.571909 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p5std"] Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.755133 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.925518 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:49 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:49 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:49 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:49 crc kubenswrapper[4925]: I0121 10:58:49.925625 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.151765 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.151848 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.202917 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" event={"ID":"15dcf9e9-44e8-4662-9f3d-6cef771808c5","Type":"ContainerStarted","Data":"7a522c9dafe822dfbaa2bb9b00a3c012a803db4601b2a603c5bf233679d16e69"} Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.362793 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg7xq\" (UniqueName: \"kubernetes.io/projected/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-kube-api-access-mg7xq\") pod \"certified-operators-k9xnv\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.404127 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d9qfn"] Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.420592 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k9xnv"] Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.598218 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.622797 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz8gh\" (UniqueName: \"kubernetes.io/projected/88c0c83d-a22b-4150-9572-ee68fb5f1e81-kube-api-access-xz8gh\") pod \"certified-operators-p5std\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.624083 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5std" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.644521 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45hql\" (UniqueName: \"kubernetes.io/projected/c59d1347-a48d-4337-a8d1-2e5bef1f4535-kube-api-access-45hql\") pod \"community-operators-d9qfn\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.836230 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:50 crc kubenswrapper[4925]: I0121 10:58:50.836652 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.973658 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5bk"] Jan 21 10:58:51 crc kubenswrapper[4925]: E0121 10:58:50.974095 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee785c06-3ec0-4917-a762-a5a8c178b95a" containerName="collect-profiles" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.974268 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee785c06-3ec0-4917-a762-a5a8c178b95a" containerName="collect-profiles" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.974517 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee785c06-3ec0-4917-a762-a5a8c178b95a" containerName="collect-profiles" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.981924 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5kqxm"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.983954 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.984867 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.995072 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee785c06-3ec0-4917-a762-a5a8c178b95a-secret-volume\") pod \"ee785c06-3ec0-4917-a762-a5a8c178b95a\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.995212 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kl55r\" (UniqueName: \"kubernetes.io/projected/ee785c06-3ec0-4917-a762-a5a8c178b95a-kube-api-access-kl55r\") pod \"ee785c06-3ec0-4917-a762-a5a8c178b95a\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:50.995507 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee785c06-3ec0-4917-a762-a5a8c178b95a-config-volume\") pod \"ee785c06-3ec0-4917-a762-a5a8c178b95a\" (UID: \"ee785c06-3ec0-4917-a762-a5a8c178b95a\") " Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.006977 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xq95p"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.007126 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee785c06-3ec0-4917-a762-a5a8c178b95a-config-volume" (OuterVolumeSpecName: "config-volume") pod "ee785c06-3ec0-4917-a762-a5a8c178b95a" (UID: "ee785c06-3ec0-4917-a762-a5a8c178b95a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.008588 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.011928 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qt57g"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.013661 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.020779 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee785c06-3ec0-4917-a762-a5a8c178b95a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ee785c06-3ec0-4917-a762-a5a8c178b95a" (UID: "ee785c06-3ec0-4917-a762-a5a8c178b95a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.023972 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:51 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:51 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:51 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.024555 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.024821 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.024358 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee785c06-3ec0-4917-a762-a5a8c178b95a-kube-api-access-kl55r" (OuterVolumeSpecName: "kube-api-access-kl55r") pod "ee785c06-3ec0-4917-a762-a5a8c178b95a" (UID: "ee785c06-3ec0-4917-a762-a5a8c178b95a"). InnerVolumeSpecName "kube-api-access-kl55r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.036199 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.078367 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-qrrl6" podStartSLOduration=39.078344082 podStartE2EDuration="39.078344082s" podCreationTimestamp="2026-01-21 10:58:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:51.061931127 +0000 UTC m=+222.665823061" watchObservedRunningTime="2026-01-21 10:58:51.078344082 +0000 UTC m=+222.682236016" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.079613 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5bk"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102275 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-utilities\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102346 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-utilities\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102380 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6b8h\" (UniqueName: \"kubernetes.io/projected/e4de47a6-b14d-4651-8568-49845b60ee7e-kube-api-access-f6b8h\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102441 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-catalog-content\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102550 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx98h\" (UniqueName: \"kubernetes.io/projected/f6d949bc-f771-4100-8afa-ff89f3da97d7-kube-api-access-sx98h\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102579 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-catalog-content\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102619 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-catalog-content\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102656 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-catalog-content\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102698 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqj7h\" (UniqueName: \"kubernetes.io/projected/3afd79f3-5455-427f-a278-62309cd643ec-kube-api-access-gqj7h\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102727 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-utilities\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102761 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-utilities\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102788 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9csw\" (UniqueName: \"kubernetes.io/projected/758a7d1b-c327-42ee-a585-efa49ec90d5e-kube-api-access-f9csw\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102844 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee785c06-3ec0-4917-a762-a5a8c178b95a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102864 4925 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee785c06-3ec0-4917-a762-a5a8c178b95a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.102879 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kl55r\" (UniqueName: \"kubernetes.io/projected/ee785c06-3ec0-4917-a762-a5a8c178b95a-kube-api-access-kl55r\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.129514 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xq95p"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.160491 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5kqxm"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236429 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9csw\" (UniqueName: \"kubernetes.io/projected/758a7d1b-c327-42ee-a585-efa49ec90d5e-kube-api-access-f9csw\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236511 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-utilities\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236546 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-utilities\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236575 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6b8h\" (UniqueName: \"kubernetes.io/projected/e4de47a6-b14d-4651-8568-49845b60ee7e-kube-api-access-f6b8h\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236600 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-catalog-content\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236864 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx98h\" (UniqueName: \"kubernetes.io/projected/f6d949bc-f771-4100-8afa-ff89f3da97d7-kube-api-access-sx98h\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236917 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-catalog-content\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.236986 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-catalog-content\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.237024 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-catalog-content\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.237063 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqj7h\" (UniqueName: \"kubernetes.io/projected/3afd79f3-5455-427f-a278-62309cd643ec-kube-api-access-gqj7h\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.237125 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-utilities\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.237181 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-utilities\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.239132 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-utilities\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.244268 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-catalog-content\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.244939 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-utilities\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.245070 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-catalog-content\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.246515 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-utilities\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.247184 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-utilities\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.248428 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-catalog-content\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.259727 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-catalog-content\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.365029 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqj7h\" (UniqueName: \"kubernetes.io/projected/3afd79f3-5455-427f-a278-62309cd643ec-kube-api-access-gqj7h\") pod \"redhat-operators-qt57g\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.461566 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" event={"ID":"ee785c06-3ec0-4917-a762-a5a8c178b95a","Type":"ContainerDied","Data":"16b8eb5f7a3626fc7516b9e60dd71e21f35b57dbb8897df704e481ea5643a097"} Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.479571 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16b8eb5f7a3626fc7516b9e60dd71e21f35b57dbb8897df704e481ea5643a097" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.476843 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6b8h\" (UniqueName: \"kubernetes.io/projected/e4de47a6-b14d-4651-8568-49845b60ee7e-kube-api-access-f6b8h\") pod \"redhat-marketplace-xq95p\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.466724 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.549896 4925 generic.go:334] "Generic (PLEG): container finished" podID="da6cdc16-92f1-4475-99f2-c087b77d76cf" containerID="00c07a5be7f901effc1f4d1eef550e4e0495263770ac9ea8b9ee4477c338fa20" exitCode=0 Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.624011 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.796006 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"da6cdc16-92f1-4475-99f2-c087b77d76cf","Type":"ContainerDied","Data":"00c07a5be7f901effc1f4d1eef550e4e0495263770ac9ea8b9ee4477c338fa20"} Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.665989 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.818116 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.885691 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qt57g"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.886259 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-m7dl4"] Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.892751 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:51 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:51 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:51 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.892842 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.913278 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx98h\" (UniqueName: \"kubernetes.io/projected/f6d949bc-f771-4100-8afa-ff89f3da97d7-kube-api-access-sx98h\") pod \"redhat-marketplace-4w5bk\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.934548 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9csw\" (UniqueName: \"kubernetes.io/projected/758a7d1b-c327-42ee-a585-efa49ec90d5e-kube-api-access-f9csw\") pod \"redhat-operators-5kqxm\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:51 crc kubenswrapper[4925]: I0121 10:58:51.964095 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 10:58:52 crc kubenswrapper[4925]: I0121 10:58:52.021485 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 10:58:52 crc kubenswrapper[4925]: I0121 10:58:52.673106 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x5pnh"] Jan 21 10:58:52 crc kubenswrapper[4925]: I0121 10:58:52.682480 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" event={"ID":"0770d392-cbe7-4049-aa81-46d3892bc4a9","Type":"ContainerStarted","Data":"04bbc8544d504fd9a331192d92ca8ab5db1ff8c08af71035057172db10f28a16"} Jan 21 10:58:52 crc kubenswrapper[4925]: I0121 10:58:52.741987 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"334a23ee-2f20-4067-92a6-ff134cbd5bf2","Type":"ContainerStarted","Data":"4679fa799df9938c04f39e8cbecb569d899632542182ff16e85ae39426ddaec3"} Jan 21 10:58:52 crc kubenswrapper[4925]: I0121 10:58:52.848821 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p5std"] Jan 21 10:58:52 crc kubenswrapper[4925]: W0121 10:58:52.922269 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod970344f4_64f6_4ffc_9896_6dd169ca1553.slice/crio-91d710456a200dd01636e65640434f883794bbb59a0ad135f9417e16807457bb WatchSource:0}: Error finding container 91d710456a200dd01636e65640434f883794bbb59a0ad135f9417e16807457bb: Status 404 returned error can't find the container with id 91d710456a200dd01636e65640434f883794bbb59a0ad135f9417e16807457bb Jan 21 10:58:52 crc kubenswrapper[4925]: I0121 10:58:52.958517 4925 patch_prober.go:28] interesting pod/apiserver-76f77b778f-pxkk7 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]log ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]etcd ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/generic-apiserver-start-informers ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/max-in-flight-filter ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 21 10:58:52 crc kubenswrapper[4925]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 21 10:58:52 crc kubenswrapper[4925]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/project.openshift.io-projectcache ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-startinformers ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 21 10:58:52 crc kubenswrapper[4925]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 21 10:58:52 crc kubenswrapper[4925]: livez check failed Jan 21 10:58:52 crc kubenswrapper[4925]: I0121 10:58:52.958621 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" podUID="e0376dda-f02a-464e-ae41-18d6fddd7097" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:53 crc kubenswrapper[4925]: I0121 10:58:53.687146 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:53 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:53 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:53 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:53 crc kubenswrapper[4925]: I0121 10:58:53.687797 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:53 crc kubenswrapper[4925]: I0121 10:58:53.866340 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerStarted","Data":"dc2ed219a7a86aff66e395787fa688a802fec68ad690222fdd6262af5df03e65"} Jan 21 10:58:53 crc kubenswrapper[4925]: I0121 10:58:53.888382 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:53 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:53 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:53 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:53 crc kubenswrapper[4925]: I0121 10:58:53.889226 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:53 crc kubenswrapper[4925]: I0121 10:58:53.914645 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x5pnh" event={"ID":"970344f4-64f6-4ffc-9896-6dd169ca1553","Type":"ContainerStarted","Data":"91d710456a200dd01636e65640434f883794bbb59a0ad135f9417e16807457bb"} Jan 21 10:58:53 crc kubenswrapper[4925]: I0121 10:58:53.928745 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k9xnv"] Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.068801 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.068893 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.068905 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.068981 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.069060 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.070275 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"bde04c60608718c197f551117feaef2b20fbbd8bb179be6bbac5f80533954ae9"} pod="openshift-console/downloads-7954f5f757-vw8cb" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.070534 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" containerID="cri-o://bde04c60608718c197f551117feaef2b20fbbd8bb179be6bbac5f80533954ae9" gracePeriod=2 Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.068813 4925 patch_prober.go:28] interesting pod/console-f9d7485db-7lrsj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.070741 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.091554 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.091621 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.134555 4925 patch_prober.go:28] interesting pod/apiserver-76f77b778f-pxkk7 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]log ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]etcd ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/generic-apiserver-start-informers ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/max-in-flight-filter ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 21 10:58:54 crc kubenswrapper[4925]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/project.openshift.io-projectcache ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-startinformers ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 21 10:58:54 crc kubenswrapper[4925]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 21 10:58:54 crc kubenswrapper[4925]: livez check failed Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.134646 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" podUID="e0376dda-f02a-464e-ae41-18d6fddd7097" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.366851 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d9qfn"] Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.411114 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 10:58:54 crc kubenswrapper[4925]: W0121 10:58:54.676775 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc59d1347_a48d_4337_a8d1_2e5bef1f4535.slice/crio-2c636e6bc4477ff1104784e408bb291fbf10d106e95dd2d8085f9b0ccb71cf70 WatchSource:0}: Error finding container 2c636e6bc4477ff1104784e408bb291fbf10d106e95dd2d8085f9b0ccb71cf70: Status 404 returned error can't find the container with id 2c636e6bc4477ff1104784e408bb291fbf10d106e95dd2d8085f9b0ccb71cf70 Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.934319 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:54 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:54 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:54 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:54 crc kubenswrapper[4925]: I0121 10:58:54.934548 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:55 crc kubenswrapper[4925]: E0121 10:58:55.070618 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod890e3b6e_bd8d_438c_992b_508bb751bdca.slice/crio-bde04c60608718c197f551117feaef2b20fbbd8bb179be6bbac5f80533954ae9.scope\": RecentStats: unable to find data in memory cache]" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.071542 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9xnv" event={"ID":"4c65dfb5-99b9-4899-9a86-b9e05194e9a4","Type":"ContainerStarted","Data":"2a279298cb460e86cfcb2eece5fd741bff561541dda8234c6406e1524c336761"} Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.087500 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xq95p"] Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.101843 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" event={"ID":"0770d392-cbe7-4049-aa81-46d3892bc4a9","Type":"ContainerStarted","Data":"1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf"} Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.103463 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.117338 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerStarted","Data":"0400ff4f00e12f3d471ed1ada23e8de1c582adedd143632df981968299002603"} Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.147658 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92"] Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.148070 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerName="route-controller-manager" containerID="cri-o://9edc1e2a18a0c9ea071b1b67200624b28a0ea24fceb803b9aeb60cfc92ac9908" gracePeriod=30 Jan 21 10:58:55 crc kubenswrapper[4925]: W0121 10:58:55.163713 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4de47a6_b14d_4651_8568_49845b60ee7e.slice/crio-cd53f36b196321d5416ce99b634fc8f51b3a64f0ca259cee2905f112c4236ac0 WatchSource:0}: Error finding container cd53f36b196321d5416ce99b634fc8f51b3a64f0ca259cee2905f112c4236ac0: Status 404 returned error can't find the container with id cd53f36b196321d5416ce99b634fc8f51b3a64f0ca259cee2905f112c4236ac0 Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.165474 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.169980 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"334a23ee-2f20-4067-92a6-ff134cbd5bf2","Type":"ContainerStarted","Data":"f7d46130fc2605334547081536e389633077925d7e00bcbfea93090cd0fbb598"} Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.186075 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qt57g"] Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.221258 4925 generic.go:334] "Generic (PLEG): container finished" podID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerID="bde04c60608718c197f551117feaef2b20fbbd8bb179be6bbac5f80533954ae9" exitCode=0 Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.221554 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerDied","Data":"bde04c60608718c197f551117feaef2b20fbbd8bb179be6bbac5f80533954ae9"} Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.307940 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d9qfn" event={"ID":"c59d1347-a48d-4337-a8d1-2e5bef1f4535","Type":"ContainerStarted","Data":"2c636e6bc4477ff1104784e408bb291fbf10d106e95dd2d8085f9b0ccb71cf70"} Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.327048 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" podStartSLOduration=188.32701732 podStartE2EDuration="3m8.32701732s" podCreationTimestamp="2026-01-21 10:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:55.326154372 +0000 UTC m=+226.930046316" watchObservedRunningTime="2026-01-21 10:58:55.32701732 +0000 UTC m=+226.930909254" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.393292 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.460524 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=10.460492659 podStartE2EDuration="10.460492659s" podCreationTimestamp="2026-01-21 10:58:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:58:55.456937581 +0000 UTC m=+227.060829525" watchObservedRunningTime="2026-01-21 10:58:55.460492659 +0000 UTC m=+227.064384593" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.504190 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da6cdc16-92f1-4475-99f2-c087b77d76cf-kube-api-access\") pod \"da6cdc16-92f1-4475-99f2-c087b77d76cf\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.504497 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da6cdc16-92f1-4475-99f2-c087b77d76cf-kubelet-dir\") pod \"da6cdc16-92f1-4475-99f2-c087b77d76cf\" (UID: \"da6cdc16-92f1-4475-99f2-c087b77d76cf\") " Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.505173 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da6cdc16-92f1-4475-99f2-c087b77d76cf-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "da6cdc16-92f1-4475-99f2-c087b77d76cf" (UID: "da6cdc16-92f1-4475-99f2-c087b77d76cf"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.513434 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da6cdc16-92f1-4475-99f2-c087b77d76cf-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "da6cdc16-92f1-4475-99f2-c087b77d76cf" (UID: "da6cdc16-92f1-4475-99f2-c087b77d76cf"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.562237 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5kqxm"] Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.606873 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da6cdc16-92f1-4475-99f2-c087b77d76cf-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.606933 4925 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da6cdc16-92f1-4475-99f2-c087b77d76cf-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.662806 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5bk"] Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.885736 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:55 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:55 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:55 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:55 crc kubenswrapper[4925]: I0121 10:58:55.885851 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.421732 4925 generic.go:334] "Generic (PLEG): container finished" podID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerID="2bf163e510f0cfb687e17b13a7e2ef82f3047a4501fa7acc8fd440566cf3f6fd" exitCode=0 Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.423172 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9xnv" event={"ID":"4c65dfb5-99b9-4899-9a86-b9e05194e9a4","Type":"ContainerDied","Data":"2bf163e510f0cfb687e17b13a7e2ef82f3047a4501fa7acc8fd440566cf3f6fd"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.432960 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.436481 4925 generic.go:334] "Generic (PLEG): container finished" podID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerID="3b6fcb43863d0e64c50aaf0ab48a15d1c7203266b4cf5f7dd929eb6431949616" exitCode=0 Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.437631 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x5pnh" event={"ID":"970344f4-64f6-4ffc-9896-6dd169ca1553","Type":"ContainerDied","Data":"3b6fcb43863d0e64c50aaf0ab48a15d1c7203266b4cf5f7dd929eb6431949616"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.446493 4925 generic.go:334] "Generic (PLEG): container finished" podID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerID="9edc1e2a18a0c9ea071b1b67200624b28a0ea24fceb803b9aeb60cfc92ac9908" exitCode=0 Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.446585 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" event={"ID":"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1","Type":"ContainerDied","Data":"9edc1e2a18a0c9ea071b1b67200624b28a0ea24fceb803b9aeb60cfc92ac9908"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.446641 4925 scope.go:117] "RemoveContainer" containerID="9edc1e2a18a0c9ea071b1b67200624b28a0ea24fceb803b9aeb60cfc92ac9908" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.446676 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.462113 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xq95p" event={"ID":"e4de47a6-b14d-4651-8568-49845b60ee7e","Type":"ContainerStarted","Data":"cd53f36b196321d5416ce99b634fc8f51b3a64f0ca259cee2905f112c4236ac0"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.516917 4925 generic.go:334] "Generic (PLEG): container finished" podID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerID="ee70e1ce9a091dc45869d21c6c727e4145bafd02e3b093d9e8d684d28fb4b05b" exitCode=0 Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.517119 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d9qfn" event={"ID":"c59d1347-a48d-4337-a8d1-2e5bef1f4535","Type":"ContainerDied","Data":"ee70e1ce9a091dc45869d21c6c727e4145bafd02e3b093d9e8d684d28fb4b05b"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.549929 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-client-ca\") pod \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.550040 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-config\") pod \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.550118 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-serving-cert\") pod \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.550187 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wz4nd\" (UniqueName: \"kubernetes.io/projected/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-kube-api-access-wz4nd\") pod \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\" (UID: \"f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1\") " Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.573625 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kqxm" event={"ID":"758a7d1b-c327-42ee-a585-efa49ec90d5e","Type":"ContainerStarted","Data":"691a901442f536659c6b46bf664301ab71e3e3501f7571a84fe19db65d78fda7"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.602522 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-config" (OuterVolumeSpecName: "config") pod "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" (UID: "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.662498 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-client-ca" (OuterVolumeSpecName: "client-ca") pod "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" (UID: "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.675304 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.675573 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"da6cdc16-92f1-4475-99f2-c087b77d76cf","Type":"ContainerDied","Data":"9cc568cc463b0ae72f04fdc9281c73c300fbe4bee8f5eebe0206dc2a76d6c0e1"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.675635 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cc568cc463b0ae72f04fdc9281c73c300fbe4bee8f5eebe0206dc2a76d6c0e1" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.697307 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-kube-api-access-wz4nd" (OuterVolumeSpecName: "kube-api-access-wz4nd") pod "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" (UID: "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1"). InnerVolumeSpecName "kube-api-access-wz4nd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.698339 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" (UID: "f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.699132 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5bk" event={"ID":"f6d949bc-f771-4100-8afa-ff89f3da97d7","Type":"ContainerStarted","Data":"0fb9180355bdc02c44cf35bed9bfc48642ca7a4086b7af89dafb21ad84f1a897"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.714609 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.714684 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wz4nd\" (UniqueName: \"kubernetes.io/projected/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-kube-api-access-wz4nd\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.714707 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.714719 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.762845 4925 generic.go:334] "Generic (PLEG): container finished" podID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerID="0400ff4f00e12f3d471ed1ada23e8de1c582adedd143632df981968299002603" exitCode=0 Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.763101 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerDied","Data":"0400ff4f00e12f3d471ed1ada23e8de1c582adedd143632df981968299002603"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.771318 4925 generic.go:334] "Generic (PLEG): container finished" podID="3afd79f3-5455-427f-a278-62309cd643ec" containerID="b41f26a985e7845cb9ab9cd567ae885660102b037a4083bae748fcef9a70262b" exitCode=0 Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.771450 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qt57g" event={"ID":"3afd79f3-5455-427f-a278-62309cd643ec","Type":"ContainerDied","Data":"b41f26a985e7845cb9ab9cd567ae885660102b037a4083bae748fcef9a70262b"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.771496 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qt57g" event={"ID":"3afd79f3-5455-427f-a278-62309cd643ec","Type":"ContainerStarted","Data":"8cb8b63d283f465b8988374ab0c8fd1eeb2243e0517fb739add08f9c99f90c21"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.816904 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerStarted","Data":"d02d30a2c5b9a29447aa853d2f8b668e61f66393bc67c40778bd298dfa8f1995"} Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.818186 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.822409 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.822486 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.874513 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:56 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:56 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:56 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.874603 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.928795 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dz6wr"] Jan 21 10:58:56 crc kubenswrapper[4925]: I0121 10:58:56.929081 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" podUID="8f1540bb-bd69-4f44-ac02-8da0575056e1" containerName="controller-manager" containerID="cri-o://93d15aad339a63927328769ce895c0e16bd3af9dddd9e8a4aa52b91a69588839" gracePeriod=30 Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.064292 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc"] Jan 21 10:58:57 crc kubenswrapper[4925]: E0121 10:58:57.064988 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da6cdc16-92f1-4475-99f2-c087b77d76cf" containerName="pruner" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.065238 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="da6cdc16-92f1-4475-99f2-c087b77d76cf" containerName="pruner" Jan 21 10:58:57 crc kubenswrapper[4925]: E0121 10:58:57.065464 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerName="route-controller-manager" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.065600 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerName="route-controller-manager" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.065989 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" containerName="route-controller-manager" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.066534 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="da6cdc16-92f1-4475-99f2-c087b77d76cf" containerName="pruner" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.067956 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.078857 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.078942 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.078876 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.079253 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.079956 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.081078 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.114312 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc"] Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.124966 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92"] Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.125842 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f8b7194-c07f-469d-b203-f7106a54fa0f-serving-cert\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.125934 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jfng\" (UniqueName: \"kubernetes.io/projected/1f8b7194-c07f-469d-b203-f7106a54fa0f-kube-api-access-6jfng\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.126035 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-config\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.126070 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-client-ca\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.131505 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-nrk92"] Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.228236 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-config\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.228914 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-client-ca\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.229180 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f8b7194-c07f-469d-b203-f7106a54fa0f-serving-cert\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.229353 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jfng\" (UniqueName: \"kubernetes.io/projected/1f8b7194-c07f-469d-b203-f7106a54fa0f-kube-api-access-6jfng\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.232163 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-config\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.236943 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-client-ca\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.248879 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f8b7194-c07f-469d-b203-f7106a54fa0f-serving-cert\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.257592 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jfng\" (UniqueName: \"kubernetes.io/projected/1f8b7194-c07f-469d-b203-f7106a54fa0f-kube-api-access-6jfng\") pod \"route-controller-manager-6465467578-m6lzc\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.437119 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.520130 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1" path="/var/lib/kubelet/pods/f3c55b7a-d6a2-4e49-96c3-e1d47689e7c1/volumes" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.859426 4925 generic.go:334] "Generic (PLEG): container finished" podID="8f1540bb-bd69-4f44-ac02-8da0575056e1" containerID="93d15aad339a63927328769ce895c0e16bd3af9dddd9e8a4aa52b91a69588839" exitCode=0 Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.859598 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" event={"ID":"8f1540bb-bd69-4f44-ac02-8da0575056e1","Type":"ContainerDied","Data":"93d15aad339a63927328769ce895c0e16bd3af9dddd9e8a4aa52b91a69588839"} Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.864158 4925 generic.go:334] "Generic (PLEG): container finished" podID="334a23ee-2f20-4067-92a6-ff134cbd5bf2" containerID="f7d46130fc2605334547081536e389633077925d7e00bcbfea93090cd0fbb598" exitCode=0 Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.864235 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"334a23ee-2f20-4067-92a6-ff134cbd5bf2","Type":"ContainerDied","Data":"f7d46130fc2605334547081536e389633077925d7e00bcbfea93090cd0fbb598"} Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.888894 4925 generic.go:334] "Generic (PLEG): container finished" podID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerID="48b8d61046399f6c90694e5f036dd2aa5506bc62234cde66dacccfc84034744e" exitCode=0 Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.889119 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kqxm" event={"ID":"758a7d1b-c327-42ee-a585-efa49ec90d5e","Type":"ContainerDied","Data":"48b8d61046399f6c90694e5f036dd2aa5506bc62234cde66dacccfc84034744e"} Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.889627 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:57 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:57 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:57 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.889737 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.900740 4925 generic.go:334] "Generic (PLEG): container finished" podID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerID="4d7f83e0fb63c60edcd5c14b60f38f9f558e2a90165e87a3d1a82bbfa24aa6e7" exitCode=0 Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.900840 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xq95p" event={"ID":"e4de47a6-b14d-4651-8568-49845b60ee7e","Type":"ContainerDied","Data":"4d7f83e0fb63c60edcd5c14b60f38f9f558e2a90165e87a3d1a82bbfa24aa6e7"} Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.904840 4925 generic.go:334] "Generic (PLEG): container finished" podID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerID="b9c3cc25c302118378be1508c24e8b928bf3a15a3c47f1e7a17185bc397809b9" exitCode=0 Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.904922 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5bk" event={"ID":"f6d949bc-f771-4100-8afa-ff89f3da97d7","Type":"ContainerDied","Data":"b9c3cc25c302118378be1508c24e8b928bf3a15a3c47f1e7a17185bc397809b9"} Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.906342 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:58:57 crc kubenswrapper[4925]: I0121 10:58:57.906419 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:58:58 crc kubenswrapper[4925]: I0121 10:58:58.362111 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc"] Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.551145 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:59 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:59 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:59 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.551888 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.597333 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.606645 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-pxkk7" Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.687746 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc"] Jan 21 10:58:59 crc kubenswrapper[4925]: W0121 10:58:59.770387 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f8b7194_c07f_469d_b203_f7106a54fa0f.slice/crio-1f0b8fb80dd4d111b3a6ef46e9af514a84743afde44b78f34fe5f74e560890d6 WatchSource:0}: Error finding container 1f0b8fb80dd4d111b3a6ef46e9af514a84743afde44b78f34fe5f74e560890d6: Status 404 returned error can't find the container with id 1f0b8fb80dd4d111b3a6ef46e9af514a84743afde44b78f34fe5f74e560890d6 Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.845332 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.877352 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:58:59 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:58:59 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:58:59 crc kubenswrapper[4925]: healthz check failed Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.877583 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.928093 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-proxy-ca-bundles\") pod \"8f1540bb-bd69-4f44-ac02-8da0575056e1\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.928203 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-client-ca\") pod \"8f1540bb-bd69-4f44-ac02-8da0575056e1\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.928267 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1540bb-bd69-4f44-ac02-8da0575056e1-serving-cert\") pod \"8f1540bb-bd69-4f44-ac02-8da0575056e1\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.928311 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4np7\" (UniqueName: \"kubernetes.io/projected/8f1540bb-bd69-4f44-ac02-8da0575056e1-kube-api-access-j4np7\") pod \"8f1540bb-bd69-4f44-ac02-8da0575056e1\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.928470 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-config\") pod \"8f1540bb-bd69-4f44-ac02-8da0575056e1\" (UID: \"8f1540bb-bd69-4f44-ac02-8da0575056e1\") " Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.931003 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-config" (OuterVolumeSpecName: "config") pod "8f1540bb-bd69-4f44-ac02-8da0575056e1" (UID: "8f1540bb-bd69-4f44-ac02-8da0575056e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.930990 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-client-ca" (OuterVolumeSpecName: "client-ca") pod "8f1540bb-bd69-4f44-ac02-8da0575056e1" (UID: "8f1540bb-bd69-4f44-ac02-8da0575056e1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:58:59 crc kubenswrapper[4925]: I0121 10:58:59.932053 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8f1540bb-bd69-4f44-ac02-8da0575056e1" (UID: "8f1540bb-bd69-4f44-ac02-8da0575056e1"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.031037 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.031097 4925 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.031111 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1540bb-bd69-4f44-ac02-8da0575056e1-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.042276 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f1540bb-bd69-4f44-ac02-8da0575056e1-kube-api-access-j4np7" (OuterVolumeSpecName: "kube-api-access-j4np7") pod "8f1540bb-bd69-4f44-ac02-8da0575056e1" (UID: "8f1540bb-bd69-4f44-ac02-8da0575056e1"). InnerVolumeSpecName "kube-api-access-j4np7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.043246 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f1540bb-bd69-4f44-ac02-8da0575056e1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8f1540bb-bd69-4f44-ac02-8da0575056e1" (UID: "8f1540bb-bd69-4f44-ac02-8da0575056e1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.082342 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx"] Jan 21 10:59:00 crc kubenswrapper[4925]: E0121 10:59:00.082796 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f1540bb-bd69-4f44-ac02-8da0575056e1" containerName="controller-manager" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.082827 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f1540bb-bd69-4f44-ac02-8da0575056e1" containerName="controller-manager" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.082960 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f1540bb-bd69-4f44-ac02-8da0575056e1" containerName="controller-manager" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.084081 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.116305 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx"] Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.133968 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-config\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.134049 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mc6jf\" (UniqueName: \"kubernetes.io/projected/4374182f-5a91-416c-a25c-c20b66d4fb68-kube-api-access-mc6jf\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.134100 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4374182f-5a91-416c-a25c-c20b66d4fb68-serving-cert\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.134195 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-client-ca\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.134230 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-proxy-ca-bundles\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.134350 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1540bb-bd69-4f44-ac02-8da0575056e1-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.134368 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4np7\" (UniqueName: \"kubernetes.io/projected/8f1540bb-bd69-4f44-ac02-8da0575056e1-kube-api-access-j4np7\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.242862 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-client-ca\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.242974 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-proxy-ca-bundles\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.243045 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-config\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.243074 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mc6jf\" (UniqueName: \"kubernetes.io/projected/4374182f-5a91-416c-a25c-c20b66d4fb68-kube-api-access-mc6jf\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.243123 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4374182f-5a91-416c-a25c-c20b66d4fb68-serving-cert\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.247728 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-proxy-ca-bundles\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.255232 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-client-ca\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.258498 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-config\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.261009 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4374182f-5a91-416c-a25c-c20b66d4fb68-serving-cert\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.284854 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mc6jf\" (UniqueName: \"kubernetes.io/projected/4374182f-5a91-416c-a25c-c20b66d4fb68-kube-api-access-mc6jf\") pod \"controller-manager-5d84fbf44f-2dtzx\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.421928 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.503362 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.612369 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kubelet-dir\") pod \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.613128 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kube-api-access\") pod \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\" (UID: \"334a23ee-2f20-4067-92a6-ff134cbd5bf2\") " Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.614088 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "334a23ee-2f20-4067-92a6-ff134cbd5bf2" (UID: "334a23ee-2f20-4067-92a6-ff134cbd5bf2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.715968 4925 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.823088 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "334a23ee-2f20-4067-92a6-ff134cbd5bf2" (UID: "334a23ee-2f20-4067-92a6-ff134cbd5bf2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.852622 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" event={"ID":"1f8b7194-c07f-469d-b203-f7106a54fa0f","Type":"ContainerStarted","Data":"1f0b8fb80dd4d111b3a6ef46e9af514a84743afde44b78f34fe5f74e560890d6"} Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.875954 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:00 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:00 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:00 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.876185 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.878811 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" event={"ID":"8f1540bb-bd69-4f44-ac02-8da0575056e1","Type":"ContainerDied","Data":"ef5d2eee6c13c1ab928d64110e81b48ac01799bec489b1d52f703dbc65d2e399"} Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.878907 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dz6wr" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.878924 4925 scope.go:117] "RemoveContainer" containerID="93d15aad339a63927328769ce895c0e16bd3af9dddd9e8a4aa52b91a69588839" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.888290 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"334a23ee-2f20-4067-92a6-ff134cbd5bf2","Type":"ContainerDied","Data":"4679fa799df9938c04f39e8cbecb569d899632542182ff16e85ae39426ddaec3"} Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.888369 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.888369 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4679fa799df9938c04f39e8cbecb569d899632542182ff16e85ae39426ddaec3" Jan 21 10:59:00 crc kubenswrapper[4925]: I0121 10:59:00.980769 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334a23ee-2f20-4067-92a6-ff134cbd5bf2-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:01 crc kubenswrapper[4925]: I0121 10:59:01.163663 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dz6wr"] Jan 21 10:59:01 crc kubenswrapper[4925]: I0121 10:59:01.177950 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dz6wr"] Jan 21 10:59:01 crc kubenswrapper[4925]: I0121 10:59:01.460513 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx"] Jan 21 10:59:01 crc kubenswrapper[4925]: I0121 10:59:01.792869 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f1540bb-bd69-4f44-ac02-8da0575056e1" path="/var/lib/kubelet/pods/8f1540bb-bd69-4f44-ac02-8da0575056e1/volumes" Jan 21 10:59:01 crc kubenswrapper[4925]: I0121 10:59:01.880679 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:01 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:01 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:01 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:01 crc kubenswrapper[4925]: I0121 10:59:01.880806 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:02 crc kubenswrapper[4925]: I0121 10:59:02.669921 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" event={"ID":"4374182f-5a91-416c-a25c-c20b66d4fb68","Type":"ContainerStarted","Data":"3a733b47255c64902ad96d63efe576a56a99740c8d193a9783bd97854eb71147"} Jan 21 10:59:02 crc kubenswrapper[4925]: I0121 10:59:02.897030 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:02 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:02 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:02 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:02 crc kubenswrapper[4925]: I0121 10:59:02.897760 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.700977 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" event={"ID":"1f8b7194-c07f-469d-b203-f7106a54fa0f","Type":"ContainerStarted","Data":"62db10468353323aa2852b8cf30878b4c4fa2c342fcdb9f8d46d3af35330a207"} Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.701189 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" podUID="1f8b7194-c07f-469d-b203-f7106a54fa0f" containerName="route-controller-manager" containerID="cri-o://62db10468353323aa2852b8cf30878b4c4fa2c342fcdb9f8d46d3af35330a207" gracePeriod=30 Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.701315 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.743182 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" event={"ID":"4374182f-5a91-416c-a25c-c20b66d4fb68","Type":"ContainerStarted","Data":"f3adec58f0c35fff173da56a96d0ee34c5238d306c503ff9c5742bfeeab48478"} Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.746259 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.748143 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" podStartSLOduration=6.748108887 podStartE2EDuration="6.748108887s" podCreationTimestamp="2026-01-21 10:58:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:59:03.73676441 +0000 UTC m=+235.340656374" watchObservedRunningTime="2026-01-21 10:59:03.748108887 +0000 UTC m=+235.352000821" Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.820387 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.824688 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.846142 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" podStartSLOduration=5.845857049 podStartE2EDuration="5.845857049s" podCreationTimestamp="2026-01-21 10:58:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:59:03.831789732 +0000 UTC m=+235.435681666" watchObservedRunningTime="2026-01-21 10:59:03.845857049 +0000 UTC m=+235.449748983" Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.912347 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:03 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:03 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:03 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:03 crc kubenswrapper[4925]: I0121 10:59:03.912469 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.061901 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.062020 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.062481 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.062630 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.062925 4925 patch_prober.go:28] interesting pod/console-f9d7485db-7lrsj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.062965 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.813235 4925 generic.go:334] "Generic (PLEG): container finished" podID="1f8b7194-c07f-469d-b203-f7106a54fa0f" containerID="62db10468353323aa2852b8cf30878b4c4fa2c342fcdb9f8d46d3af35330a207" exitCode=0 Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.814770 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" event={"ID":"1f8b7194-c07f-469d-b203-f7106a54fa0f","Type":"ContainerDied","Data":"62db10468353323aa2852b8cf30878b4c4fa2c342fcdb9f8d46d3af35330a207"} Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.933031 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:04 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:04 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:04 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:04 crc kubenswrapper[4925]: I0121 10:59:04.933196 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.640803 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.786614 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f8b7194-c07f-469d-b203-f7106a54fa0f-serving-cert\") pod \"1f8b7194-c07f-469d-b203-f7106a54fa0f\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.786701 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jfng\" (UniqueName: \"kubernetes.io/projected/1f8b7194-c07f-469d-b203-f7106a54fa0f-kube-api-access-6jfng\") pod \"1f8b7194-c07f-469d-b203-f7106a54fa0f\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.786791 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-client-ca\") pod \"1f8b7194-c07f-469d-b203-f7106a54fa0f\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.786842 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-config\") pod \"1f8b7194-c07f-469d-b203-f7106a54fa0f\" (UID: \"1f8b7194-c07f-469d-b203-f7106a54fa0f\") " Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.788287 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-config" (OuterVolumeSpecName: "config") pod "1f8b7194-c07f-469d-b203-f7106a54fa0f" (UID: "1f8b7194-c07f-469d-b203-f7106a54fa0f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.788370 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-client-ca" (OuterVolumeSpecName: "client-ca") pod "1f8b7194-c07f-469d-b203-f7106a54fa0f" (UID: "1f8b7194-c07f-469d-b203-f7106a54fa0f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.799975 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8b7194-c07f-469d-b203-f7106a54fa0f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1f8b7194-c07f-469d-b203-f7106a54fa0f" (UID: "1f8b7194-c07f-469d-b203-f7106a54fa0f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.800860 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8b7194-c07f-469d-b203-f7106a54fa0f-kube-api-access-6jfng" (OuterVolumeSpecName: "kube-api-access-6jfng") pod "1f8b7194-c07f-469d-b203-f7106a54fa0f" (UID: "1f8b7194-c07f-469d-b203-f7106a54fa0f"). InnerVolumeSpecName "kube-api-access-6jfng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.875283 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:05 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:05 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:05 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.875367 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.889607 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jfng\" (UniqueName: \"kubernetes.io/projected/1f8b7194-c07f-469d-b203-f7106a54fa0f-kube-api-access-6jfng\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.889688 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.889713 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f8b7194-c07f-469d-b203-f7106a54fa0f-config\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:05 crc kubenswrapper[4925]: I0121 10:59:05.889756 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f8b7194-c07f-469d-b203-f7106a54fa0f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.143884 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.162837 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc" event={"ID":"1f8b7194-c07f-469d-b203-f7106a54fa0f","Type":"ContainerDied","Data":"1f0b8fb80dd4d111b3a6ef46e9af514a84743afde44b78f34fe5f74e560890d6"} Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.162937 4925 scope.go:117] "RemoveContainer" containerID="62db10468353323aa2852b8cf30878b4c4fa2c342fcdb9f8d46d3af35330a207" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.243830 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc"] Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.250582 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6465467578-m6lzc"] Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.678935 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk"] Jan 21 10:59:06 crc kubenswrapper[4925]: E0121 10:59:06.679329 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8b7194-c07f-469d-b203-f7106a54fa0f" containerName="route-controller-manager" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.679353 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8b7194-c07f-469d-b203-f7106a54fa0f" containerName="route-controller-manager" Jan 21 10:59:06 crc kubenswrapper[4925]: E0121 10:59:06.679366 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="334a23ee-2f20-4067-92a6-ff134cbd5bf2" containerName="pruner" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.679374 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="334a23ee-2f20-4067-92a6-ff134cbd5bf2" containerName="pruner" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.679534 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f8b7194-c07f-469d-b203-f7106a54fa0f" containerName="route-controller-manager" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.679551 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="334a23ee-2f20-4067-92a6-ff134cbd5bf2" containerName="pruner" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.680008 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.690306 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.690916 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.691114 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.691118 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.693081 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.693514 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.738609 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fvwpn" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.742015 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk"] Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.771503 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-config\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.771605 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-client-ca\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.771705 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f99616a-3317-414e-a865-dc4753aed67a-serving-cert\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.771863 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8kxh\" (UniqueName: \"kubernetes.io/projected/2f99616a-3317-414e-a865-dc4753aed67a-kube-api-access-b8kxh\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.876972 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-client-ca\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.877052 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-config\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.877142 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f99616a-3317-414e-a865-dc4753aed67a-serving-cert\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.877243 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8kxh\" (UniqueName: \"kubernetes.io/projected/2f99616a-3317-414e-a865-dc4753aed67a-kube-api-access-b8kxh\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.880806 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-client-ca\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.882320 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-config\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.891761 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f99616a-3317-414e-a865-dc4753aed67a-serving-cert\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.908345 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:06 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:06 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:06 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.908536 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:06 crc kubenswrapper[4925]: I0121 10:59:06.963019 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8kxh\" (UniqueName: \"kubernetes.io/projected/2f99616a-3317-414e-a865-dc4753aed67a-kube-api-access-b8kxh\") pod \"route-controller-manager-64655f4f9f-27sjk\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:07 crc kubenswrapper[4925]: I0121 10:59:07.021003 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:07 crc kubenswrapper[4925]: I0121 10:59:07.550637 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f8b7194-c07f-469d-b203-f7106a54fa0f" path="/var/lib/kubelet/pods/1f8b7194-c07f-469d-b203-f7106a54fa0f/volumes" Jan 21 10:59:07 crc kubenswrapper[4925]: I0121 10:59:07.875580 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:07 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:07 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:07 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:07 crc kubenswrapper[4925]: I0121 10:59:07.876014 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:08 crc kubenswrapper[4925]: I0121 10:59:08.534458 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk"] Jan 21 10:59:08 crc kubenswrapper[4925]: I0121 10:59:08.895291 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:08 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:08 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:08 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:08 crc kubenswrapper[4925]: I0121 10:59:08.895387 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:09 crc kubenswrapper[4925]: I0121 10:59:09.216989 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" event={"ID":"2f99616a-3317-414e-a865-dc4753aed67a","Type":"ContainerStarted","Data":"dc66a7989f370e6ef9db018f695ffde67f5fb105b2a4915060f61d617f6deff4"} Jan 21 10:59:09 crc kubenswrapper[4925]: I0121 10:59:09.903600 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:09 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:09 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:09 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:09 crc kubenswrapper[4925]: I0121 10:59:09.904189 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:10 crc kubenswrapper[4925]: I0121 10:59:10.896803 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:10 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:10 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:10 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:10 crc kubenswrapper[4925]: I0121 10:59:10.896904 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:11 crc kubenswrapper[4925]: I0121 10:59:11.936978 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:11 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:11 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:11 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:11 crc kubenswrapper[4925]: I0121 10:59:11.937096 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:12 crc kubenswrapper[4925]: I0121 10:59:12.307461 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" event={"ID":"2f99616a-3317-414e-a865-dc4753aed67a","Type":"ContainerStarted","Data":"5e342e4189a05be7c352bf2724df379aeda6d25be0bc3498959fb07440e1bec5"} Jan 21 10:59:12 crc kubenswrapper[4925]: I0121 10:59:12.878201 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:12 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:12 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:12 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:12 crc kubenswrapper[4925]: I0121 10:59:12.878339 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:13 crc kubenswrapper[4925]: I0121 10:59:13.369081 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:13 crc kubenswrapper[4925]: I0121 10:59:13.386441 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 10:59:13 crc kubenswrapper[4925]: I0121 10:59:13.395229 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" podStartSLOduration=15.395188871 podStartE2EDuration="15.395188871s" podCreationTimestamp="2026-01-21 10:58:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 10:59:13.390783246 +0000 UTC m=+244.994675190" watchObservedRunningTime="2026-01-21 10:59:13.395188871 +0000 UTC m=+244.999080805" Jan 21 10:59:13 crc kubenswrapper[4925]: I0121 10:59:13.874880 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:13 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:13 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:13 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:13 crc kubenswrapper[4925]: I0121 10:59:13.875271 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.025547 4925 patch_prober.go:28] interesting pod/console-f9d7485db-7lrsj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.025663 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.178154 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.178240 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.181190 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.181248 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.876761 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:14 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:14 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:14 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:14 crc kubenswrapper[4925]: I0121 10:59:14.876908 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:16 crc kubenswrapper[4925]: I0121 10:59:16.012552 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:16 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:16 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:16 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:16 crc kubenswrapper[4925]: I0121 10:59:16.012716 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:17 crc kubenswrapper[4925]: I0121 10:59:17.058353 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:17 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:17 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:17 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:17 crc kubenswrapper[4925]: I0121 10:59:17.059068 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:17 crc kubenswrapper[4925]: I0121 10:59:17.879565 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:17 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:17 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:17 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:17 crc kubenswrapper[4925]: I0121 10:59:17.879641 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:17 crc kubenswrapper[4925]: I0121 10:59:17.888872 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.256848 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:19 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:19 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:19 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.257327 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.496469 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.501026 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.510079 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.512582 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.539038 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.539169 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.549672 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.647408 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.647618 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.647908 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:19 crc kubenswrapper[4925]: I0121 10:59:19.718153 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.047779 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.048007 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.049732 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.051108 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.051239 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603" gracePeriod=600 Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.048299 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.049114 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:20 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:20 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:20 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.052280 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.960569 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:20 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:20 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:20 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:20 crc kubenswrapper[4925]: I0121 10:59:20.960842 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:21 crc kubenswrapper[4925]: I0121 10:59:21.594795 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603" exitCode=0 Jan 21 10:59:21 crc kubenswrapper[4925]: I0121 10:59:21.594876 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603"} Jan 21 10:59:21 crc kubenswrapper[4925]: I0121 10:59:21.907735 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:21 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:21 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:21 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:21 crc kubenswrapper[4925]: I0121 10:59:21.907824 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:23 crc kubenswrapper[4925]: I0121 10:59:23.173534 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:23 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:23 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:23 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:23 crc kubenswrapper[4925]: I0121 10:59:23.173661 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:23 crc kubenswrapper[4925]: I0121 10:59:23.877020 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:23 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:23 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:23 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:23 crc kubenswrapper[4925]: I0121 10:59:23.877145 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.051813 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.051897 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.051964 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.051813 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.052479 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.052779 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.052803 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.057516 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"d02d30a2c5b9a29447aa853d2f8b668e61f66393bc67c40778bd298dfa8f1995"} pod="openshift-console/downloads-7954f5f757-vw8cb" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.057703 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" containerID="cri-o://d02d30a2c5b9a29447aa853d2f8b668e61f66393bc67c40778bd298dfa8f1995" gracePeriod=2 Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.082740 4925 patch_prober.go:28] interesting pod/console-f9d7485db-7lrsj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.082835 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.592139 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.595231 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.616425 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.691935 4925 generic.go:334] "Generic (PLEG): container finished" podID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerID="d02d30a2c5b9a29447aa853d2f8b668e61f66393bc67c40778bd298dfa8f1995" exitCode=0 Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.692371 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerDied","Data":"d02d30a2c5b9a29447aa853d2f8b668e61f66393bc67c40778bd298dfa8f1995"} Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.692575 4925 scope.go:117] "RemoveContainer" containerID="bde04c60608718c197f551117feaef2b20fbbd8bb179be6bbac5f80533954ae9" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.749261 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.749332 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9358efee-87a3-49bf-a75c-a45dc2ac2987-kube-api-access\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.749511 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-var-lock\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.852122 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-var-lock\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.852844 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.852489 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-var-lock\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.852913 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9358efee-87a3-49bf-a75c-a45dc2ac2987-kube-api-access\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.853085 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.876778 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:24 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:24 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:24 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.876881 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:24 crc kubenswrapper[4925]: I0121 10:59:24.893997 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9358efee-87a3-49bf-a75c-a45dc2ac2987-kube-api-access\") pod \"installer-9-crc\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:25 crc kubenswrapper[4925]: I0121 10:59:25.078260 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 10:59:25 crc kubenswrapper[4925]: I0121 10:59:25.876030 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:25 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:25 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:25 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:25 crc kubenswrapper[4925]: I0121 10:59:25.876291 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:26 crc kubenswrapper[4925]: I0121 10:59:26.874868 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:26 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:26 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:26 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:26 crc kubenswrapper[4925]: I0121 10:59:26.875571 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:27 crc kubenswrapper[4925]: I0121 10:59:27.970920 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:27 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:27 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:27 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:27 crc kubenswrapper[4925]: I0121 10:59:27.971011 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:28 crc kubenswrapper[4925]: I0121 10:59:28.978519 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:28 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:28 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:28 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:28 crc kubenswrapper[4925]: I0121 10:59:28.978619 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:29 crc kubenswrapper[4925]: I0121 10:59:29.891379 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:29 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:29 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:29 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:29 crc kubenswrapper[4925]: I0121 10:59:29.891473 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:30 crc kubenswrapper[4925]: I0121 10:59:30.902037 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:30 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:30 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:30 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:30 crc kubenswrapper[4925]: I0121 10:59:30.902473 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:32 crc kubenswrapper[4925]: I0121 10:59:32.172064 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:32 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:32 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:32 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:32 crc kubenswrapper[4925]: I0121 10:59:32.172142 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:32 crc kubenswrapper[4925]: I0121 10:59:32.875289 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:32 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:32 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:32 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:32 crc kubenswrapper[4925]: I0121 10:59:32.875412 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.478238 4925 patch_prober.go:28] interesting pod/console-f9d7485db-7lrsj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.480288 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.479892 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:34 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:34 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:34 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.481259 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.486730 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.486847 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.881097 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:34 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:34 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:34 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:34 crc kubenswrapper[4925]: I0121 10:59:34.881218 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:35 crc kubenswrapper[4925]: I0121 10:59:35.874220 4925 patch_prober.go:28] interesting pod/router-default-5444994796-n2k47 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 10:59:35 crc kubenswrapper[4925]: [-]has-synced failed: reason withheld Jan 21 10:59:35 crc kubenswrapper[4925]: [+]process-running ok Jan 21 10:59:35 crc kubenswrapper[4925]: healthz check failed Jan 21 10:59:35 crc kubenswrapper[4925]: I0121 10:59:35.874359 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-n2k47" podUID="fb3fdc07-c6f5-4330-8b00-e454c98ef11d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 10:59:36 crc kubenswrapper[4925]: I0121 10:59:36.875049 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:59:36 crc kubenswrapper[4925]: I0121 10:59:36.878664 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-n2k47" Jan 21 10:59:44 crc kubenswrapper[4925]: I0121 10:59:44.034485 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:44 crc kubenswrapper[4925]: I0121 10:59:44.034597 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 10:59:44 crc kubenswrapper[4925]: I0121 10:59:44.035629 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:59:44 crc kubenswrapper[4925]: I0121 10:59:44.046117 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 10:59:51 crc kubenswrapper[4925]: I0121 10:59:51.963284 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:51.964845 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:51.965052 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:51.965258 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:51.967543 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:51.967976 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:51.968177 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.008580 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.030950 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.036595 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.043259 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.043684 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.118884 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.129664 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 10:59:53 crc kubenswrapper[4925]: I0121 10:59:52.336550 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 10:59:54 crc kubenswrapper[4925]: I0121 10:59:54.036055 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 10:59:54 crc kubenswrapper[4925]: I0121 10:59:54.036208 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.243363 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp"] Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.249466 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.252567 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp"] Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.254821 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.255289 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.338673 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jm9f\" (UniqueName: \"kubernetes.io/projected/50f7361b-c2aa-49d1-8300-88ccc99af201-kube-api-access-7jm9f\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.338930 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50f7361b-c2aa-49d1-8300-88ccc99af201-config-volume\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.339067 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50f7361b-c2aa-49d1-8300-88ccc99af201-secret-volume\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.440199 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50f7361b-c2aa-49d1-8300-88ccc99af201-config-volume\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.440276 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50f7361b-c2aa-49d1-8300-88ccc99af201-secret-volume\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.440350 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jm9f\" (UniqueName: \"kubernetes.io/projected/50f7361b-c2aa-49d1-8300-88ccc99af201-kube-api-access-7jm9f\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.441656 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50f7361b-c2aa-49d1-8300-88ccc99af201-config-volume\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.459733 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50f7361b-c2aa-49d1-8300-88ccc99af201-secret-volume\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.462437 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jm9f\" (UniqueName: \"kubernetes.io/projected/50f7361b-c2aa-49d1-8300-88ccc99af201-kube-api-access-7jm9f\") pod \"collect-profiles-29483220-hx8hp\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:00 crc kubenswrapper[4925]: I0121 11:00:00.590956 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:03 crc kubenswrapper[4925]: I0121 11:00:03.717284 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 21 11:00:04 crc kubenswrapper[4925]: I0121 11:00:04.037544 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:04 crc kubenswrapper[4925]: I0121 11:00:04.038132 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:07 crc kubenswrapper[4925]: E0121 11:00:07.232117 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 21 11:00:07 crc kubenswrapper[4925]: E0121 11:00:07.232949 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2kkp5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-x5pnh_openshift-marketplace(970344f4-64f6-4ffc-9896-6dd169ca1553): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:07 crc kubenswrapper[4925]: E0121 11:00:07.234631 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-x5pnh" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" Jan 21 11:00:07 crc kubenswrapper[4925]: E0121 11:00:07.640784 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 21 11:00:07 crc kubenswrapper[4925]: E0121 11:00:07.641720 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-45hql,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-d9qfn_openshift-marketplace(c59d1347-a48d-4337-a8d1-2e5bef1f4535): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:07 crc kubenswrapper[4925]: E0121 11:00:07.642991 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-d9qfn" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" Jan 21 11:00:09 crc kubenswrapper[4925]: I0121 11:00:09.115702 4925 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 21 11:00:09 crc kubenswrapper[4925]: E0121 11:00:09.702781 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 21 11:00:09 crc kubenswrapper[4925]: E0121 11:00:09.703088 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f6b8h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-xq95p_openshift-marketplace(e4de47a6-b14d-4651-8568-49845b60ee7e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:09 crc kubenswrapper[4925]: E0121 11:00:09.704286 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-xq95p" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" Jan 21 11:00:12 crc kubenswrapper[4925]: E0121 11:00:12.189983 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 21 11:00:12 crc kubenswrapper[4925]: E0121 11:00:12.190889 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xz8gh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-p5std_openshift-marketplace(88c0c83d-a22b-4150-9572-ee68fb5f1e81): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:12 crc kubenswrapper[4925]: E0121 11:00:12.192223 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-p5std" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" Jan 21 11:00:14 crc kubenswrapper[4925]: I0121 11:00:14.032595 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:14 crc kubenswrapper[4925]: I0121 11:00:14.032769 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.715701 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-xq95p" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.716368 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-d9qfn" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.716476 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-x5pnh" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.728114 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-p5std" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" Jan 21 11:00:16 crc kubenswrapper[4925]: W0121 11:00:16.730493 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-4a3baeb8247095e07c39c8bd0383332406891a40c75365da3a4fbf60b4f0ad7b WatchSource:0}: Error finding container 4a3baeb8247095e07c39c8bd0383332406891a40c75365da3a4fbf60b4f0ad7b: Status 404 returned error can't find the container with id 4a3baeb8247095e07c39c8bd0383332406891a40c75365da3a4fbf60b4f0ad7b Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.818164 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.818436 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gqj7h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-qt57g_openshift-marketplace(3afd79f3-5455-427f-a278-62309cd643ec): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.819704 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-qt57g" podUID="3afd79f3-5455-427f-a278-62309cd643ec" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.831300 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.831564 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sx98h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-4w5bk_openshift-marketplace(f6d949bc-f771-4100-8afa-ff89f3da97d7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:16 crc kubenswrapper[4925]: E0121 11:00:16.832823 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-4w5bk" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" Jan 21 11:00:17 crc kubenswrapper[4925]: I0121 11:00:17.186954 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"4a3baeb8247095e07c39c8bd0383332406891a40c75365da3a4fbf60b4f0ad7b"} Jan 21 11:00:17 crc kubenswrapper[4925]: I0121 11:00:17.196727 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7","Type":"ContainerStarted","Data":"d220b3636c2a9aa1dd4404ab1ee2fef912adccdc39ef7c0f0f32861251d9969b"} Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.203784 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-qt57g" podUID="3afd79f3-5455-427f-a278-62309cd643ec" Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.220862 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-4w5bk" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.249225 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.250108 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mg7xq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-k9xnv_openshift-marketplace(4c65dfb5-99b9-4899-9a86-b9e05194e9a4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.251671 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-k9xnv" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.389535 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.390295 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f9csw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5kqxm_openshift-marketplace(758a7d1b-c327-42ee-a585-efa49ec90d5e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:00:17 crc kubenswrapper[4925]: E0121 11:00:17.391529 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-5kqxm" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" Jan 21 11:00:17 crc kubenswrapper[4925]: W0121 11:00:17.393647 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-19e6384400434aae2f5c1c8d6d61f3ea59346e7b73f7d96359b10b183a4be0e0 WatchSource:0}: Error finding container 19e6384400434aae2f5c1c8d6d61f3ea59346e7b73f7d96359b10b183a4be0e0: Status 404 returned error can't find the container with id 19e6384400434aae2f5c1c8d6d61f3ea59346e7b73f7d96359b10b183a4be0e0 Jan 21 11:00:17 crc kubenswrapper[4925]: I0121 11:00:17.625718 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp"] Jan 21 11:00:17 crc kubenswrapper[4925]: W0121 11:00:17.632039 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50f7361b_c2aa_49d1_8300_88ccc99af201.slice/crio-3b002306fbd8c61b03c4d0862ce244452ac877f8387a3ba1a2dedc1e15e9df43 WatchSource:0}: Error finding container 3b002306fbd8c61b03c4d0862ce244452ac877f8387a3ba1a2dedc1e15e9df43: Status 404 returned error can't find the container with id 3b002306fbd8c61b03c4d0862ce244452ac877f8387a3ba1a2dedc1e15e9df43 Jan 21 11:00:17 crc kubenswrapper[4925]: W0121 11:00:17.733171 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-c2aa1924545b1d4544b3a86cfdb081a4dd5130df2b53b99de0a887facc3a8f90 WatchSource:0}: Error finding container c2aa1924545b1d4544b3a86cfdb081a4dd5130df2b53b99de0a887facc3a8f90: Status 404 returned error can't find the container with id c2aa1924545b1d4544b3a86cfdb081a4dd5130df2b53b99de0a887facc3a8f90 Jan 21 11:00:17 crc kubenswrapper[4925]: I0121 11:00:17.747826 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 21 11:00:17 crc kubenswrapper[4925]: W0121 11:00:17.779028 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod9358efee_87a3_49bf_a75c_a45dc2ac2987.slice/crio-7c3bfe9470dbb0bc36a305c7b36af1fd3ccf79317ee1538053cd5db26e231378 WatchSource:0}: Error finding container 7c3bfe9470dbb0bc36a305c7b36af1fd3ccf79317ee1538053cd5db26e231378: Status 404 returned error can't find the container with id 7c3bfe9470dbb0bc36a305c7b36af1fd3ccf79317ee1538053cd5db26e231378 Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.207331 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"7d15779caa6e5b388f79a4466fbe1abe55140d18037403d8c0435912eed61b60"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.209131 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" event={"ID":"50f7361b-c2aa-49d1-8300-88ccc99af201","Type":"ContainerStarted","Data":"3b002306fbd8c61b03c4d0862ce244452ac877f8387a3ba1a2dedc1e15e9df43"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.211668 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerStarted","Data":"e152775e118bb31ae036075307031f395ba956e6b2fe120df78b6ebc69c0f468"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.211937 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.212663 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.212740 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.214492 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c2aa1924545b1d4544b3a86cfdb081a4dd5130df2b53b99de0a887facc3a8f90"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.215550 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b62d847f76b14e1292eb735efa4651f2762fdaf6ca02aed3b2e343b6eecc2aac"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.215578 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"19e6384400434aae2f5c1c8d6d61f3ea59346e7b73f7d96359b10b183a4be0e0"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.218219 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9358efee-87a3-49bf-a75c-a45dc2ac2987","Type":"ContainerStarted","Data":"7c3bfe9470dbb0bc36a305c7b36af1fd3ccf79317ee1538053cd5db26e231378"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.219920 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8832796ff7487c55a4cfaf5382cc3a15bdcbdb74e8f14c4614fd62862b8e3fd3"} Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.220044 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.221639 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7","Type":"ContainerStarted","Data":"55b524fd13831ab39d327b270249215c7708b3309be48eda33dbfde682137a6b"} Jan 21 11:00:18 crc kubenswrapper[4925]: E0121 11:00:18.225207 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5kqxm" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" Jan 21 11:00:18 crc kubenswrapper[4925]: E0121 11:00:18.226590 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-k9xnv" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" Jan 21 11:00:18 crc kubenswrapper[4925]: I0121 11:00:18.332583 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=59.332539101 podStartE2EDuration="59.332539101s" podCreationTimestamp="2026-01-21 10:59:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:00:18.303470771 +0000 UTC m=+309.907362715" watchObservedRunningTime="2026-01-21 11:00:18.332539101 +0000 UTC m=+309.936431035" Jan 21 11:00:19 crc kubenswrapper[4925]: I0121 11:00:19.229810 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" event={"ID":"50f7361b-c2aa-49d1-8300-88ccc99af201","Type":"ContainerStarted","Data":"d43006dfb4bcda468e86fda26ee219adf1b78ffafa2f5c1dd431af6708c79fe7"} Jan 21 11:00:19 crc kubenswrapper[4925]: I0121 11:00:19.234490 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"37d70d268889702ee1245d30563d5d825e177a78847cb5d2fb51867b7e12ee57"} Jan 21 11:00:19 crc kubenswrapper[4925]: I0121 11:00:19.238542 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9358efee-87a3-49bf-a75c-a45dc2ac2987","Type":"ContainerStarted","Data":"42b41f40eb7d7eb98c1f99d51bd9733d184fd1f8e0a8a28bb7bf74e4dff248f3"} Jan 21 11:00:19 crc kubenswrapper[4925]: I0121 11:00:19.239873 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:19 crc kubenswrapper[4925]: I0121 11:00:19.239965 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:19 crc kubenswrapper[4925]: I0121 11:00:19.253421 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" podStartSLOduration=19.253379226 podStartE2EDuration="19.253379226s" podCreationTimestamp="2026-01-21 11:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:00:19.249496877 +0000 UTC m=+310.853388811" watchObservedRunningTime="2026-01-21 11:00:19.253379226 +0000 UTC m=+310.857271160" Jan 21 11:00:19 crc kubenswrapper[4925]: I0121 11:00:19.295304 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=55.295272944 podStartE2EDuration="55.295272944s" podCreationTimestamp="2026-01-21 10:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:00:19.290333009 +0000 UTC m=+310.894224953" watchObservedRunningTime="2026-01-21 11:00:19.295272944 +0000 UTC m=+310.899164878" Jan 21 11:00:20 crc kubenswrapper[4925]: I0121 11:00:20.246967 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4d365b7-841c-44cb-ab4d-9fcb8493ffd7" containerID="55b524fd13831ab39d327b270249215c7708b3309be48eda33dbfde682137a6b" exitCode=0 Jan 21 11:00:20 crc kubenswrapper[4925]: I0121 11:00:20.247072 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7","Type":"ContainerDied","Data":"55b524fd13831ab39d327b270249215c7708b3309be48eda33dbfde682137a6b"} Jan 21 11:00:20 crc kubenswrapper[4925]: I0121 11:00:20.251921 4925 generic.go:334] "Generic (PLEG): container finished" podID="50f7361b-c2aa-49d1-8300-88ccc99af201" containerID="d43006dfb4bcda468e86fda26ee219adf1b78ffafa2f5c1dd431af6708c79fe7" exitCode=0 Jan 21 11:00:20 crc kubenswrapper[4925]: I0121 11:00:20.252055 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" event={"ID":"50f7361b-c2aa-49d1-8300-88ccc99af201","Type":"ContainerDied","Data":"d43006dfb4bcda468e86fda26ee219adf1b78ffafa2f5c1dd431af6708c79fe7"} Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.706293 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.713298 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.735226 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jm9f\" (UniqueName: \"kubernetes.io/projected/50f7361b-c2aa-49d1-8300-88ccc99af201-kube-api-access-7jm9f\") pod \"50f7361b-c2aa-49d1-8300-88ccc99af201\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.735366 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kube-api-access\") pod \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.735758 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kubelet-dir\") pod \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\" (UID: \"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7\") " Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.735805 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50f7361b-c2aa-49d1-8300-88ccc99af201-secret-volume\") pod \"50f7361b-c2aa-49d1-8300-88ccc99af201\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.735843 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50f7361b-c2aa-49d1-8300-88ccc99af201-config-volume\") pod \"50f7361b-c2aa-49d1-8300-88ccc99af201\" (UID: \"50f7361b-c2aa-49d1-8300-88ccc99af201\") " Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.736813 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50f7361b-c2aa-49d1-8300-88ccc99af201-config-volume" (OuterVolumeSpecName: "config-volume") pod "50f7361b-c2aa-49d1-8300-88ccc99af201" (UID: "50f7361b-c2aa-49d1-8300-88ccc99af201"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.736894 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "f4d365b7-841c-44cb-ab4d-9fcb8493ffd7" (UID: "f4d365b7-841c-44cb-ab4d-9fcb8493ffd7"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.745149 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50f7361b-c2aa-49d1-8300-88ccc99af201-kube-api-access-7jm9f" (OuterVolumeSpecName: "kube-api-access-7jm9f") pod "50f7361b-c2aa-49d1-8300-88ccc99af201" (UID: "50f7361b-c2aa-49d1-8300-88ccc99af201"). InnerVolumeSpecName "kube-api-access-7jm9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.745233 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50f7361b-c2aa-49d1-8300-88ccc99af201-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "50f7361b-c2aa-49d1-8300-88ccc99af201" (UID: "50f7361b-c2aa-49d1-8300-88ccc99af201"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.771269 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f4d365b7-841c-44cb-ab4d-9fcb8493ffd7" (UID: "f4d365b7-841c-44cb-ab4d-9fcb8493ffd7"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.837955 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.840224 4925 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4d365b7-841c-44cb-ab4d-9fcb8493ffd7-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.840467 4925 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50f7361b-c2aa-49d1-8300-88ccc99af201-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.840604 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50f7361b-c2aa-49d1-8300-88ccc99af201-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:00:21 crc kubenswrapper[4925]: I0121 11:00:21.840700 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jm9f\" (UniqueName: \"kubernetes.io/projected/50f7361b-c2aa-49d1-8300-88ccc99af201-kube-api-access-7jm9f\") on node \"crc\" DevicePath \"\"" Jan 21 11:00:22 crc kubenswrapper[4925]: I0121 11:00:22.266908 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f4d365b7-841c-44cb-ab4d-9fcb8493ffd7","Type":"ContainerDied","Data":"d220b3636c2a9aa1dd4404ab1ee2fef912adccdc39ef7c0f0f32861251d9969b"} Jan 21 11:00:22 crc kubenswrapper[4925]: I0121 11:00:22.267343 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d220b3636c2a9aa1dd4404ab1ee2fef912adccdc39ef7c0f0f32861251d9969b" Jan 21 11:00:22 crc kubenswrapper[4925]: I0121 11:00:22.267005 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 11:00:22 crc kubenswrapper[4925]: I0121 11:00:22.268964 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" event={"ID":"50f7361b-c2aa-49d1-8300-88ccc99af201","Type":"ContainerDied","Data":"3b002306fbd8c61b03c4d0862ce244452ac877f8387a3ba1a2dedc1e15e9df43"} Jan 21 11:00:22 crc kubenswrapper[4925]: I0121 11:00:22.269110 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b002306fbd8c61b03c4d0862ce244452ac877f8387a3ba1a2dedc1e15e9df43" Jan 21 11:00:22 crc kubenswrapper[4925]: I0121 11:00:22.269254 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp" Jan 21 11:00:24 crc kubenswrapper[4925]: I0121 11:00:24.033269 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:24 crc kubenswrapper[4925]: I0121 11:00:24.033409 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:24 crc kubenswrapper[4925]: I0121 11:00:24.033476 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:24 crc kubenswrapper[4925]: I0121 11:00:24.033520 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:25 crc kubenswrapper[4925]: I0121 11:00:25.969786 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vwhv9"] Jan 21 11:00:34 crc kubenswrapper[4925]: I0121 11:00:34.034449 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:34 crc kubenswrapper[4925]: I0121 11:00:34.035490 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:34 crc kubenswrapper[4925]: I0121 11:00:34.034656 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:34 crc kubenswrapper[4925]: I0121 11:00:34.035623 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:35 crc kubenswrapper[4925]: I0121 11:00:35.433520 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x5pnh" event={"ID":"970344f4-64f6-4ffc-9896-6dd169ca1553","Type":"ContainerStarted","Data":"728d8fa74c94bb1befa031d409529d33781debb2fb0064707127fd558087063c"} Jan 21 11:00:35 crc kubenswrapper[4925]: I0121 11:00:35.449996 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d9qfn" event={"ID":"c59d1347-a48d-4337-a8d1-2e5bef1f4535","Type":"ContainerStarted","Data":"20e7ea7a608ff732478ad8653d4077dbf0324eb00edd384ed14c0d56e13903ea"} Jan 21 11:00:35 crc kubenswrapper[4925]: I0121 11:00:35.458571 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xq95p" event={"ID":"e4de47a6-b14d-4651-8568-49845b60ee7e","Type":"ContainerStarted","Data":"7e00837a1ae03655737bb4fb656f706083284f752840680f548fb3ef5eeae620"} Jan 21 11:00:38 crc kubenswrapper[4925]: I0121 11:00:38.486581 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qt57g" event={"ID":"3afd79f3-5455-427f-a278-62309cd643ec","Type":"ContainerStarted","Data":"325c6df9295d5f56b5ae11f2e589bf763983fa015eaefebd533498a549202626"} Jan 21 11:00:38 crc kubenswrapper[4925]: I0121 11:00:38.490731 4925 generic.go:334] "Generic (PLEG): container finished" podID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerID="7e00837a1ae03655737bb4fb656f706083284f752840680f548fb3ef5eeae620" exitCode=0 Jan 21 11:00:38 crc kubenswrapper[4925]: I0121 11:00:38.491070 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xq95p" event={"ID":"e4de47a6-b14d-4651-8568-49845b60ee7e","Type":"ContainerDied","Data":"7e00837a1ae03655737bb4fb656f706083284f752840680f548fb3ef5eeae620"} Jan 21 11:00:38 crc kubenswrapper[4925]: I0121 11:00:38.499440 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5bk" event={"ID":"f6d949bc-f771-4100-8afa-ff89f3da97d7","Type":"ContainerStarted","Data":"650f790578afacf55003bd3d670984f6027d7a772254f4d691fead59f209c71d"} Jan 21 11:00:38 crc kubenswrapper[4925]: I0121 11:00:38.519273 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerStarted","Data":"f012b32d2fc38a6091db0decd59a9c22bc6502a45cd0bb1fc9ee8e6edf68507b"} Jan 21 11:00:39 crc kubenswrapper[4925]: I0121 11:00:39.816569 4925 generic.go:334] "Generic (PLEG): container finished" podID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerID="20e7ea7a608ff732478ad8653d4077dbf0324eb00edd384ed14c0d56e13903ea" exitCode=0 Jan 21 11:00:39 crc kubenswrapper[4925]: I0121 11:00:39.835710 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d9qfn" event={"ID":"c59d1347-a48d-4337-a8d1-2e5bef1f4535","Type":"ContainerDied","Data":"20e7ea7a608ff732478ad8653d4077dbf0324eb00edd384ed14c0d56e13903ea"} Jan 21 11:00:40 crc kubenswrapper[4925]: I0121 11:00:40.855118 4925 generic.go:334] "Generic (PLEG): container finished" podID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerID="650f790578afacf55003bd3d670984f6027d7a772254f4d691fead59f209c71d" exitCode=0 Jan 21 11:00:40 crc kubenswrapper[4925]: I0121 11:00:40.855223 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5bk" event={"ID":"f6d949bc-f771-4100-8afa-ff89f3da97d7","Type":"ContainerDied","Data":"650f790578afacf55003bd3d670984f6027d7a772254f4d691fead59f209c71d"} Jan 21 11:00:43 crc kubenswrapper[4925]: I0121 11:00:43.018224 4925 generic.go:334] "Generic (PLEG): container finished" podID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerID="728d8fa74c94bb1befa031d409529d33781debb2fb0064707127fd558087063c" exitCode=0 Jan 21 11:00:43 crc kubenswrapper[4925]: I0121 11:00:43.018368 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x5pnh" event={"ID":"970344f4-64f6-4ffc-9896-6dd169ca1553","Type":"ContainerDied","Data":"728d8fa74c94bb1befa031d409529d33781debb2fb0064707127fd558087063c"} Jan 21 11:00:43 crc kubenswrapper[4925]: I0121 11:00:43.025563 4925 generic.go:334] "Generic (PLEG): container finished" podID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerID="f012b32d2fc38a6091db0decd59a9c22bc6502a45cd0bb1fc9ee8e6edf68507b" exitCode=0 Jan 21 11:00:43 crc kubenswrapper[4925]: I0121 11:00:43.025639 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerDied","Data":"f012b32d2fc38a6091db0decd59a9c22bc6502a45cd0bb1fc9ee8e6edf68507b"} Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.032995 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.033166 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.033231 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.033340 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.033441 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.034429 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"e152775e118bb31ae036075307031f395ba956e6b2fe120df78b6ebc69c0f468"} pod="openshift-console/downloads-7954f5f757-vw8cb" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.034497 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" containerID="cri-o://e152775e118bb31ae036075307031f395ba956e6b2fe120df78b6ebc69c0f468" gracePeriod=2 Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.034456 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:44 crc kubenswrapper[4925]: I0121 11:00:44.034589 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:45 crc kubenswrapper[4925]: I0121 11:00:45.063830 4925 generic.go:334] "Generic (PLEG): container finished" podID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerID="e152775e118bb31ae036075307031f395ba956e6b2fe120df78b6ebc69c0f468" exitCode=0 Jan 21 11:00:45 crc kubenswrapper[4925]: I0121 11:00:45.063989 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerDied","Data":"e152775e118bb31ae036075307031f395ba956e6b2fe120df78b6ebc69c0f468"} Jan 21 11:00:45 crc kubenswrapper[4925]: I0121 11:00:45.064329 4925 scope.go:117] "RemoveContainer" containerID="d02d30a2c5b9a29447aa853d2f8b668e61f66393bc67c40778bd298dfa8f1995" Jan 21 11:00:47 crc kubenswrapper[4925]: I0121 11:00:47.080318 4925 generic.go:334] "Generic (PLEG): container finished" podID="3afd79f3-5455-427f-a278-62309cd643ec" containerID="325c6df9295d5f56b5ae11f2e589bf763983fa015eaefebd533498a549202626" exitCode=0 Jan 21 11:00:47 crc kubenswrapper[4925]: I0121 11:00:47.080881 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qt57g" event={"ID":"3afd79f3-5455-427f-a278-62309cd643ec","Type":"ContainerDied","Data":"325c6df9295d5f56b5ae11f2e589bf763983fa015eaefebd533498a549202626"} Jan 21 11:00:51 crc kubenswrapper[4925]: I0121 11:00:51.065811 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" containerID="cri-o://b0af228001ee8dd1a524a54390aff7f32360db1a3f5c86cf859a82ff5638775b" gracePeriod=15 Jan 21 11:00:52 crc kubenswrapper[4925]: I0121 11:00:52.179445 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 11:00:54 crc kubenswrapper[4925]: I0121 11:00:54.033366 4925 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vwhv9 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" start-of-body= Jan 21 11:00:54 crc kubenswrapper[4925]: I0121 11:00:54.033491 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" Jan 21 11:00:54 crc kubenswrapper[4925]: I0121 11:00:54.033553 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:00:54 crc kubenswrapper[4925]: I0121 11:00:54.033640 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:00:55 crc kubenswrapper[4925]: I0121 11:00:55.147513 4925 generic.go:334] "Generic (PLEG): container finished" podID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerID="b0af228001ee8dd1a524a54390aff7f32360db1a3f5c86cf859a82ff5638775b" exitCode=0 Jan 21 11:00:55 crc kubenswrapper[4925]: I0121 11:00:55.147640 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" event={"ID":"b4eed50b-ef22-4637-9aa1-d8528310aed1","Type":"ContainerDied","Data":"b0af228001ee8dd1a524a54390aff7f32360db1a3f5c86cf859a82ff5638775b"} Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.278815 4925 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.279455 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50f7361b-c2aa-49d1-8300-88ccc99af201" containerName="collect-profiles" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.279482 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="50f7361b-c2aa-49d1-8300-88ccc99af201" containerName="collect-profiles" Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.279547 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4d365b7-841c-44cb-ab4d-9fcb8493ffd7" containerName="pruner" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.279560 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4d365b7-841c-44cb-ab4d-9fcb8493ffd7" containerName="pruner" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.279766 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4d365b7-841c-44cb-ab4d-9fcb8493ffd7" containerName="pruner" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.279803 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="50f7361b-c2aa-49d1-8300-88ccc99af201" containerName="collect-profiles" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.280485 4925 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.280738 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.280922 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d" gracePeriod=15 Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.280966 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d" gracePeriod=15 Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.281142 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05" gracePeriod=15 Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.281177 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d" gracePeriod=15 Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.281081 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae" gracePeriod=15 Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.282343 4925 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.283070 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283091 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.283132 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283141 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.283155 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283162 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.283200 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283208 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.283219 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283225 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.283235 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283242 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 21 11:00:56 crc kubenswrapper[4925]: E0121 11:00:56.283252 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283280 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283583 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283601 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283615 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283712 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283721 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.283727 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.337758 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.439282 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.439882 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.440091 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.440210 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.440336 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.440485 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.440627 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.440767 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.541290 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542045 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542243 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542438 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542523 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542096 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542611 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542026 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542794 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.542905 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.543099 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.543205 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.543191 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.543256 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.543540 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.543557 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:00:56 crc kubenswrapper[4925]: I0121 11:00:56.634190 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:00:57 crc kubenswrapper[4925]: I0121 11:00:57.166123 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 11:00:57 crc kubenswrapper[4925]: I0121 11:00:57.168190 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 11:00:57 crc kubenswrapper[4925]: I0121 11:00:57.169331 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05" exitCode=2 Jan 21 11:00:57 crc kubenswrapper[4925]: E0121 11:00:57.418768 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad" Jan 21 11:00:57 crc kubenswrapper[4925]: E0121 11:00:57.422646 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad,Command:[/bin/opm],Args:[serve /extracted-catalog/catalog --cache-dir=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOMEMLIMIT,Value:120MiB,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{125829120 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2kkp5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-x5pnh_openshift-marketplace(970344f4-64f6-4ffc-9896-6dd169ca1553): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 11:00:57 crc kubenswrapper[4925]: E0121 11:00:57.422910 4925 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.113:6443: connect: connection refused" event="&Event{ObjectMeta:{community-operators-x5pnh.188cb9febeac761f openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:community-operators-x5pnh,UID:970344f4-64f6-4ffc-9896-6dd169ca1553,APIVersion:v1,ResourceVersion:28384,FieldPath:spec.containers{registry-server},},Reason:Failed,Message:Failed to pull image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\": rpc error: code = Canceled desc = copying config: context canceled,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 11:00:57.421567519 +0000 UTC m=+349.025459473,LastTimestamp:2026-01-21 11:00:57.421567519 +0000 UTC m=+349.025459473,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 11:00:57 crc kubenswrapper[4925]: E0121 11:00:57.426176 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-marketplace/community-operators-x5pnh" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" Jan 21 11:00:58 crc kubenswrapper[4925]: I0121 11:00:58.183627 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 11:00:58 crc kubenswrapper[4925]: I0121 11:00:58.185384 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 11:00:58 crc kubenswrapper[4925]: I0121 11:00:58.186492 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d" exitCode=0 Jan 21 11:00:58 crc kubenswrapper[4925]: E0121 11:00:58.883376 4925 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:00:58 crc kubenswrapper[4925]: E0121 11:00:58.883978 4925 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:00:58 crc kubenswrapper[4925]: E0121 11:00:58.885016 4925 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:00:58 crc kubenswrapper[4925]: E0121 11:00:58.885647 4925 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:00:58 crc kubenswrapper[4925]: E0121 11:00:58.886055 4925 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:00:58 crc kubenswrapper[4925]: I0121 11:00:58.886114 4925 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 21 11:00:58 crc kubenswrapper[4925]: E0121 11:00:58.886858 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="200ms" Jan 21 11:00:59 crc kubenswrapper[4925]: E0121 11:00:59.096169 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="400ms" Jan 21 11:00:59 crc kubenswrapper[4925]: E0121 11:00:59.498835 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="800ms" Jan 21 11:00:59 crc kubenswrapper[4925]: I0121 11:00:59.505998 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.206187 4925 generic.go:334] "Generic (PLEG): container finished" podID="9358efee-87a3-49bf-a75c-a45dc2ac2987" containerID="42b41f40eb7d7eb98c1f99d51bd9733d184fd1f8e0a8a28bb7bf74e4dff248f3" exitCode=0 Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.206705 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9358efee-87a3-49bf-a75c-a45dc2ac2987","Type":"ContainerDied","Data":"42b41f40eb7d7eb98c1f99d51bd9733d184fd1f8e0a8a28bb7bf74e4dff248f3"} Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.207714 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.208213 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.216637 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.220683 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.222485 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d" exitCode=0 Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.222530 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae" exitCode=0 Jan 21 11:01:00 crc kubenswrapper[4925]: I0121 11:01:00.222581 4925 scope.go:117] "RemoveContainer" containerID="80d92d06700d05dd2c80c17a20045530bb6d69b2e266382f5f456dfdd3c40b30" Jan 21 11:01:00 crc kubenswrapper[4925]: E0121 11:01:00.299762 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="1.6s" Jan 21 11:01:01 crc kubenswrapper[4925]: E0121 11:01:01.901217 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="3.2s" Jan 21 11:01:03 crc kubenswrapper[4925]: I0121 11:01:03.253775 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 11:01:03 crc kubenswrapper[4925]: I0121 11:01:03.255462 4925 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d" exitCode=0 Jan 21 11:01:04 crc kubenswrapper[4925]: I0121 11:01:04.033792 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:01:04 crc kubenswrapper[4925]: I0121 11:01:04.033921 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:01:04 crc kubenswrapper[4925]: E0121 11:01:04.170567 4925 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.113:6443: connect: connection refused" event="&Event{ObjectMeta:{community-operators-x5pnh.188cb9febeac761f openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:community-operators-x5pnh,UID:970344f4-64f6-4ffc-9896-6dd169ca1553,APIVersion:v1,ResourceVersion:28384,FieldPath:spec.containers{registry-server},},Reason:Failed,Message:Failed to pull image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\": rpc error: code = Canceled desc = copying config: context canceled,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 11:00:57.421567519 +0000 UTC m=+349.025459473,LastTimestamp:2026-01-21 11:00:57.421567519 +0000 UTC m=+349.025459473,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 11:01:05 crc kubenswrapper[4925]: I0121 11:01:05.033906 4925 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vwhv9 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 11:01:05 crc kubenswrapper[4925]: I0121 11:01:05.034059 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 21 11:01:05 crc kubenswrapper[4925]: E0121 11:01:05.103493 4925 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="6.4s" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.640687 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.642243 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.643016 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.643857 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.649741 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.650847 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.651285 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.652056 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.660054 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.662298 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.663284 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.663854 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.667752 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.668452 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.690967 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-ocp-branding-template\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691035 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691076 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-error\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691097 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-service-ca\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691118 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-trusted-ca-bundle\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691145 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-serving-cert\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691177 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691197 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9358efee-87a3-49bf-a75c-a45dc2ac2987-kube-api-access\") pod \"9358efee-87a3-49bf-a75c-a45dc2ac2987\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691237 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-policies\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691255 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-var-lock\") pod \"9358efee-87a3-49bf-a75c-a45dc2ac2987\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691277 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6zm6\" (UniqueName: \"kubernetes.io/projected/b4eed50b-ef22-4637-9aa1-d8528310aed1-kube-api-access-c6zm6\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691257 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691297 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-kubelet-dir\") pod \"9358efee-87a3-49bf-a75c-a45dc2ac2987\" (UID: \"9358efee-87a3-49bf-a75c-a45dc2ac2987\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691351 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9358efee-87a3-49bf-a75c-a45dc2ac2987" (UID: "9358efee-87a3-49bf-a75c-a45dc2ac2987"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691446 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-session\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691485 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691543 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-router-certs\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691579 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-login\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691614 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691642 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-provider-selection\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691670 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-var-lock" (OuterVolumeSpecName: "var-lock") pod "9358efee-87a3-49bf-a75c-a45dc2ac2987" (UID: "9358efee-87a3-49bf-a75c-a45dc2ac2987"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691689 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-dir\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.691724 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-cliconfig\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.692315 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-idp-0-file-data\") pod \"b4eed50b-ef22-4637-9aa1-d8528310aed1\" (UID: \"b4eed50b-ef22-4637-9aa1-d8528310aed1\") " Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.693382 4925 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-var-lock\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.693440 4925 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9358efee-87a3-49bf-a75c-a45dc2ac2987-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.693452 4925 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.693464 4925 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.694580 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.695045 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.696151 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.696244 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.700140 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.700373 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.701885 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4eed50b-ef22-4637-9aa1-d8528310aed1-kube-api-access-c6zm6" (OuterVolumeSpecName: "kube-api-access-c6zm6") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "kube-api-access-c6zm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.701870 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9358efee-87a3-49bf-a75c-a45dc2ac2987-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9358efee-87a3-49bf-a75c-a45dc2ac2987" (UID: "9358efee-87a3-49bf-a75c-a45dc2ac2987"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.703122 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.703641 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.704148 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.704545 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.705751 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.706531 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.706820 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.714684 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "b4eed50b-ef22-4637-9aa1-d8528310aed1" (UID: "b4eed50b-ef22-4637-9aa1-d8528310aed1"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:01:06 crc kubenswrapper[4925]: W0121 11:01:06.793047 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-41b3d060858c5d141f23df909d6fd1bfb3b0628e894771ddde80252de3ec05b4 WatchSource:0}: Error finding container 41b3d060858c5d141f23df909d6fd1bfb3b0628e894771ddde80252de3ec05b4: Status 404 returned error can't find the container with id 41b3d060858c5d141f23df909d6fd1bfb3b0628e894771ddde80252de3ec05b4 Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795274 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795320 4925 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795334 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795346 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795360 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795377 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795422 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795434 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795444 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795453 4925 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795465 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9358efee-87a3-49bf-a75c-a45dc2ac2987-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795475 4925 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b4eed50b-ef22-4637-9aa1-d8528310aed1-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795484 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6zm6\" (UniqueName: \"kubernetes.io/projected/b4eed50b-ef22-4637-9aa1-d8528310aed1-kube-api-access-c6zm6\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795514 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795523 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:06 crc kubenswrapper[4925]: I0121 11:01:06.795534 4925 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b4eed50b-ef22-4637-9aa1-d8528310aed1-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 21 11:01:07 crc kubenswrapper[4925]: E0121 11:01:07.052717 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T11:01:07Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T11:01:07Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T11:01:07Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T11:01:07Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:020b5bee2bbd09fbf64a1af808628bb76e9c70b9efdc49f38e5a50641590514c\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:78f8ee56f09c047b3acd7e5b6b8a0f9534952f418b658c9f5a6d45d12546e67c\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1670570239},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:985a76d8ebbdf8ece24003afb1d6ad0bf3e155bd005676f602d7f97cdad463c1\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:a52c9b1b8a47036a88322e4db1511ead83746d3ba41ce098059642099a09525e\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202798827},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:2b72e40c5d5b36b681f40c16ebf3dcac6520ed0c79f174ba87f673ab7afd209a\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:d83ee77ad07e06451a84205ac4c85c69e912a1c975e1a8a95095d79218028dce\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1178956511},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:aae73aa11d44b8831c829464aa5515a56a9a8ef17926d54a010e0e9215ecd643\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:cd24673e95503ac856405941c96e75f11ca6da85fe80950e0dd00bb1062f9f47\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1166891762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: E0121 11:01:07.053775 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: E0121 11:01:07.054037 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: E0121 11:01:07.054295 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: E0121 11:01:07.054554 4925 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: E0121 11:01:07.054571 4925 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.291029 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kqxm" event={"ID":"758a7d1b-c327-42ee-a585-efa49ec90d5e","Type":"ContainerStarted","Data":"73e7e2ef08f80ead24699fb3d441128622d7fa05fb978ad51233a689cbca3352"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.292506 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.293275 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.293948 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.294649 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.295094 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.295933 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xq95p" event={"ID":"e4de47a6-b14d-4651-8568-49845b60ee7e","Type":"ContainerStarted","Data":"fefc9e19d31158aa9cc6d75f03c81a7dbd0d658311eb3f50178ea9268553c983"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.296783 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.297152 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.297425 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.297786 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.298338 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.298749 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.300216 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5bk" event={"ID":"f6d949bc-f771-4100-8afa-ff89f3da97d7","Type":"ContainerStarted","Data":"6089f50fd53569747272a97e61a346771ac54fc9a3588b31da9d82eef9546228"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.300858 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.301172 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.301467 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.301716 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.301962 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.302204 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.302407 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.305901 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qt57g" event={"ID":"3afd79f3-5455-427f-a278-62309cd643ec","Type":"ContainerStarted","Data":"b01154a3e793f29856b682701b21a35617264abb4f2a0a8338a14f6f8729527d"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.307354 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.307654 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.307991 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.308581 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.309098 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.309433 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.309645 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.309845 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.311143 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.312153 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" event={"ID":"b4eed50b-ef22-4637-9aa1-d8528310aed1","Type":"ContainerDied","Data":"787b4c037b08656b406fd5e3fb873a6d54f00ffb51fbb5fdcdc8f707a66b147d"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.312243 4925 scope.go:117] "RemoveContainer" containerID="b0af228001ee8dd1a524a54390aff7f32360db1a3f5c86cf859a82ff5638775b" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.313047 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.313557 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.313806 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.314017 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.314220 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.314447 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.315287 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.315946 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.330354 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vw8cb" event={"ID":"890e3b6e-bd8d-438c-992b-508bb751bdca","Type":"ContainerStarted","Data":"62d282ff5f13c91696846c269527418daba1f91ff7f15bf6c2502db4fc9cd44d"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.331328 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.331644 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.332069 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.332695 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.332785 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.332836 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.332911 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.333263 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.333546 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.333767 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.334094 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.334323 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.334689 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.335733 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.336234 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.336606 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.336932 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.337305 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.337601 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.337830 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.338051 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.342928 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d9qfn" event={"ID":"c59d1347-a48d-4337-a8d1-2e5bef1f4535","Type":"ContainerStarted","Data":"821b3521e48a32b1abb1a1ca0d1d34c8825715a62265b0c3149cc9870c101546"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.343976 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.344416 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.344941 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.345237 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.350253 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.350649 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.350819 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.351008 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.351262 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.351391 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9358efee-87a3-49bf-a75c-a45dc2ac2987","Type":"ContainerDied","Data":"7c3bfe9470dbb0bc36a305c7b36af1fd3ccf79317ee1538053cd5db26e231378"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.351494 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c3bfe9470dbb0bc36a305c7b36af1fd3ccf79317ee1538053cd5db26e231378" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.351482 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.351545 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.354212 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"bd324a95fef87895723cb01beb8c3aa59b0283ea7ea20fa43711478b91c060bf"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.354274 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"41b3d060858c5d141f23df909d6fd1bfb3b0628e894771ddde80252de3ec05b4"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.360162 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.361724 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.362159 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.362718 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.363179 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.363601 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.363888 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.364249 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.367860 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.368592 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.369273 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9xnv" event={"ID":"4c65dfb5-99b9-4899-9a86-b9e05194e9a4","Type":"ContainerStarted","Data":"e8f2ee9293a890398d2b0260c4de1be357909e18f6beff9c3675dccaafe5ab61"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.370574 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.370810 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.371038 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.371265 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.371503 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.371735 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.371993 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.372277 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.372556 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.372774 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.373046 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.373387 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.373772 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.374335 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.374645 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.374927 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.375152 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.375447 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.375767 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.376147 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.376589 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.377015 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.377748 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerStarted","Data":"d2554eeb38c550cc14043ec631fb50424a86e45219dbda416ddad7f0b4960b6d"} Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.378891 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.379053 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.379205 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.379377 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.379576 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.379730 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.379879 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.380034 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.380189 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.380443 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.386144 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.391100 4925 scope.go:117] "RemoveContainer" containerID="4f23c34468eeeb75c7f63985d4d6cf476ba3705f5ebaa858643cbe22514df68d" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.391384 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.392256 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.413773 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.433335 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.437539 4925 scope.go:117] "RemoveContainer" containerID="a43e905a9a868b5fd3823f2d0a03c6ed377f23e618dfc5c438753aaea8f9d58d" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.452820 4925 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.472712 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.476040 4925 scope.go:117] "RemoveContainer" containerID="26a806d31f69845b24244507722f3c1067a5ef93d808fc0f61a40ee59902c4ae" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.493047 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.509917 4925 scope.go:117] "RemoveContainer" containerID="d8acc939187459069bec4c616e022c363a67630434ce98d7488bea83a02a6a05" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.514497 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.521683 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.533163 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.543963 4925 scope.go:117] "RemoveContainer" containerID="b052eadddd3950299ea7966e3322e9f6dd12670de431abe58f92ca0b3de0d39d" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.552662 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.572328 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.592646 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.612833 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.633069 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.634112 4925 scope.go:117] "RemoveContainer" containerID="4a3e3bb0dbfbc8aba4995d09ec962b827e5a0f928a972ec0f646c4e34837363f" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.653501 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.673246 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.693542 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.712751 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.732958 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.753423 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.772710 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.792915 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.812428 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.832914 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.853170 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:07 crc kubenswrapper[4925]: I0121 11:01:07.873052 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.403702 4925 generic.go:334] "Generic (PLEG): container finished" podID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerID="e8f2ee9293a890398d2b0260c4de1be357909e18f6beff9c3675dccaafe5ab61" exitCode=0 Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.403723 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9xnv" event={"ID":"4c65dfb5-99b9-4899-9a86-b9e05194e9a4","Type":"ContainerDied","Data":"e8f2ee9293a890398d2b0260c4de1be357909e18f6beff9c3675dccaafe5ab61"} Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.407209 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.407506 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.407703 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.408005 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.408726 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.409181 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.409867 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.410526 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.410799 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.410980 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.410999 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.411047 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:01:08 crc kubenswrapper[4925]: I0121 11:01:08.411223 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.419631 4925 generic.go:334] "Generic (PLEG): container finished" podID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerID="73e7e2ef08f80ead24699fb3d441128622d7fa05fb978ad51233a689cbca3352" exitCode=0 Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.421218 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kqxm" event={"ID":"758a7d1b-c327-42ee-a585-efa49ec90d5e","Type":"ContainerDied","Data":"73e7e2ef08f80ead24699fb3d441128622d7fa05fb978ad51233a689cbca3352"} Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.422221 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.422435 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.422485 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.422546 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.423112 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.428290 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.428564 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.428862 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.429449 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.429717 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.429951 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.430153 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.430448 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.502666 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.525245 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.525863 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.526102 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.526300 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.526581 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.527023 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.527676 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.528146 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.528687 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.529058 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.530914 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.531363 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.531629 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.532705 4925 status_manager.go:851] "Failed to get status for pod" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" pod="openshift-marketplace/community-operators-x5pnh" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-x5pnh\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.533070 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.534648 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.537894 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.538450 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.538782 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.539045 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.539347 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.539649 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.541343 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.558989 4925 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.559056 4925 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:09 crc kubenswrapper[4925]: E0121 11:01:09.559356 4925 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:09 crc kubenswrapper[4925]: I0121 11:01:09.560114 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:09 crc kubenswrapper[4925]: W0121 11:01:09.593816 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-7b1a3c464cd1f27d60edd4a63d264e3d1881efbb5286e4dce1e1030ae2639bcf WatchSource:0}: Error finding container 7b1a3c464cd1f27d60edd4a63d264e3d1881efbb5286e4dce1e1030ae2639bcf: Status 404 returned error can't find the container with id 7b1a3c464cd1f27d60edd4a63d264e3d1881efbb5286e4dce1e1030ae2639bcf Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.428308 4925 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="f248d3f9428ee01590985f4461addb1afe35557919f0eb30d35ebfc21015fc5d" exitCode=0 Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.429692 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"f248d3f9428ee01590985f4461addb1afe35557919f0eb30d35ebfc21015fc5d"} Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.429736 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7b1a3c464cd1f27d60edd4a63d264e3d1881efbb5286e4dce1e1030ae2639bcf"} Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.429967 4925 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.429980 4925 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:10 crc kubenswrapper[4925]: E0121 11:01:10.430510 4925 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.431086 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.432288 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.432858 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.433346 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.433728 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.433984 4925 status_manager.go:851] "Failed to get status for pod" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" pod="openshift-marketplace/community-operators-x5pnh" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-x5pnh\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.434252 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.434575 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.434824 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.435002 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.435161 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.435374 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.624438 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p5std" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.624488 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p5std" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.838070 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.838757 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.984798 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.985837 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.986597 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.987347 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.987773 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.988142 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.988702 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.989063 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.989418 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.989917 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.990239 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.990552 4925 status_manager.go:851] "Failed to get status for pod" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" pod="openshift-marketplace/community-operators-x5pnh" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-x5pnh\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.990771 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p5std" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.990971 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.991869 4925 status_manager.go:851] "Failed to get status for pod" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" pod="openshift-marketplace/community-operators-d9qfn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-d9qfn\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.992379 4925 status_manager.go:851] "Failed to get status for pod" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" pod="openshift-marketplace/redhat-marketplace-xq95p" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xq95p\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.993148 4925 status_manager.go:851] "Failed to get status for pod" podUID="3afd79f3-5455-427f-a278-62309cd643ec" pod="openshift-marketplace/redhat-operators-qt57g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-qt57g\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.993614 4925 status_manager.go:851] "Failed to get status for pod" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.993997 4925 status_manager.go:851] "Failed to get status for pod" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" pod="openshift-marketplace/community-operators-x5pnh" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-x5pnh\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.994363 4925 status_manager.go:851] "Failed to get status for pod" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" pod="openshift-marketplace/certified-operators-k9xnv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-k9xnv\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.994851 4925 status_manager.go:851] "Failed to get status for pod" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" pod="openshift-marketplace/redhat-marketplace-4w5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4w5bk\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.995144 4925 status_manager.go:851] "Failed to get status for pod" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" pod="openshift-marketplace/certified-operators-p5std" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-p5std\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.995614 4925 status_manager.go:851] "Failed to get status for pod" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" pod="openshift-authentication/oauth-openshift-558db77b4-vwhv9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-vwhv9\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.996132 4925 status_manager.go:851] "Failed to get status for pod" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" pod="openshift-console/downloads-7954f5f757-vw8cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-vw8cb\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.996491 4925 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:10 crc kubenswrapper[4925]: I0121 11:01:10.996960 4925 status_manager.go:851] "Failed to get status for pod" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" pod="openshift-marketplace/redhat-operators-5kqxm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5kqxm\": dial tcp 38.102.83.113:6443: connect: connection refused" Jan 21 11:01:11 crc kubenswrapper[4925]: I0121 11:01:11.438531 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2365ca45c07c7bce80252fd61b8ee7a1c5bf0f3ed4af0b4cee6d2bc6f061dc96"} Jan 21 11:01:11 crc kubenswrapper[4925]: I0121 11:01:11.592602 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 11:01:11 crc kubenswrapper[4925]: I0121 11:01:11.634456 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 11:01:11 crc kubenswrapper[4925]: I0121 11:01:11.635384 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 11:01:11 crc kubenswrapper[4925]: I0121 11:01:11.706168 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 11:01:11 crc kubenswrapper[4925]: I0121 11:01:11.707224 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 11:01:11 crc kubenswrapper[4925]: I0121 11:01:11.789480 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.022345 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.022519 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.118993 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.450338 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.450431 4925 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd" exitCode=1 Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.450497 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd"} Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.451701 4925 scope.go:117] "RemoveContainer" containerID="65ea678b360e84c806c2b313a901acf70deabedf3ca7898b414977be5d0b5fbd" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.619843 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.637625 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 11:01:12 crc kubenswrapper[4925]: I0121 11:01:12.856030 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qt57g" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="registry-server" probeResult="failure" output=< Jan 21 11:01:12 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:01:12 crc kubenswrapper[4925]: > Jan 21 11:01:14 crc kubenswrapper[4925]: I0121 11:01:14.048628 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:01:14 crc kubenswrapper[4925]: I0121 11:01:14.049180 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:01:14 crc kubenswrapper[4925]: I0121 11:01:14.048662 4925 patch_prober.go:28] interesting pod/downloads-7954f5f757-vw8cb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Jan 21 11:01:14 crc kubenswrapper[4925]: I0121 11:01:14.049253 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vw8cb" podUID="890e3b6e-bd8d-438c-992b-508bb751bdca" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.28:8080/\": dial tcp 10.217.0.28:8080: connect: connection refused" Jan 21 11:01:14 crc kubenswrapper[4925]: I0121 11:01:14.690616 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 11:01:19 crc kubenswrapper[4925]: I0121 11:01:19.575095 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 11:01:20 crc kubenswrapper[4925]: I0121 11:01:20.603999 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 11:01:20 crc kubenswrapper[4925]: I0121 11:01:20.689460 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p5std" Jan 21 11:01:21 crc kubenswrapper[4925]: I0121 11:01:21.754762 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 11:01:21 crc kubenswrapper[4925]: I0121 11:01:21.802524 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 11:01:24 crc kubenswrapper[4925]: I0121 11:01:24.046148 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-vw8cb" Jan 21 11:01:27 crc kubenswrapper[4925]: I0121 11:01:27.915115 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9xnv" event={"ID":"4c65dfb5-99b9-4899-9a86-b9e05194e9a4","Type":"ContainerStarted","Data":"73ef3b1d7fd817b4e6e518b19757a67879eba8250d605677aae2c0c1ee285e98"} Jan 21 11:01:27 crc kubenswrapper[4925]: I0121 11:01:27.924532 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 21 11:01:27 crc kubenswrapper[4925]: I0121 11:01:27.924629 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c3a85ba38c869c9207151190045f0fdc5155ec4fdd655a70aacde6314d9553d3"} Jan 21 11:01:27 crc kubenswrapper[4925]: I0121 11:01:27.932334 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x5pnh" event={"ID":"970344f4-64f6-4ffc-9896-6dd169ca1553","Type":"ContainerStarted","Data":"5f070fb17f289564191231b954b0caba00f8d80c298463bdb3ed82121a031b60"} Jan 21 11:01:27 crc kubenswrapper[4925]: I0121 11:01:27.935725 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2c184ff3e795eaa7969a616c14ada155198e477553813fb0f88d94e203cc2a9e"} Jan 21 11:01:27 crc kubenswrapper[4925]: I0121 11:01:27.946019 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kqxm" event={"ID":"758a7d1b-c327-42ee-a585-efa49ec90d5e","Type":"ContainerStarted","Data":"e62ec8ed815c06740e3cc15a998b9e6d9f74706e7e12e4cbccf7b66b9d0351ea"} Jan 21 11:01:28 crc kubenswrapper[4925]: I0121 11:01:28.954819 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"6b1d33819ce5785122848e7af275813629e452f2954c2a9868cc4d77df7d46e4"} Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.204425 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.262907 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.399125 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.463986 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.519208 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.755890 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.755997 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 11:01:29 crc kubenswrapper[4925]: I0121 11:01:29.818023 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.375496 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.376469 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.450361 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.578727 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.600601 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.600665 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.604267 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.616882 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.620737 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.673973 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 11:01:30 crc kubenswrapper[4925]: I0121 11:01:30.803315 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x5pnh" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="registry-server" probeResult="failure" output=< Jan 21 11:01:30 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:01:30 crc kubenswrapper[4925]: > Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.062153 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.096084 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.212096 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.301503 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.315024 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.827988 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.828818 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.906131 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 21 11:01:31 crc kubenswrapper[4925]: I0121 11:01:31.911952 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.179998 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.180077 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.185091 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.185692 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.186098 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.186295 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.190862 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.204759 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.219919 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.388296 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.484109 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.635178 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.638086 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.794810 4925 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.869963 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.939296 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.954278 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 21 11:01:32 crc kubenswrapper[4925]: I0121 11:01:32.998378 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.014179 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.049992 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.115451 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.224539 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.240994 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5kqxm" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="registry-server" probeResult="failure" output=< Jan 21 11:01:33 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:01:33 crc kubenswrapper[4925]: > Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.315541 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.334653 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.343751 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.423454 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.436199 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.453025 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.489510 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.522434 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.607904 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.620715 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.711805 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.718272 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.720150 4925 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.820871 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.840079 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 21 11:01:33 crc kubenswrapper[4925]: I0121 11:01:33.892348 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.082571 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.089598 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.126613 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.160622 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.226865 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.271147 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.314837 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.315213 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.410897 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.522240 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.585705 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.608904 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.638540 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.641201 4925 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.648998 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4w5bk" podStartSLOduration=35.796685255 podStartE2EDuration="2m44.648952528s" podCreationTimestamp="2026-01-21 10:58:50 +0000 UTC" firstStartedPulling="2026-01-21 10:58:57.912268449 +0000 UTC m=+229.516160373" lastFinishedPulling="2026-01-21 11:01:06.764535722 +0000 UTC m=+358.368427646" observedRunningTime="2026-01-21 11:01:19.246865131 +0000 UTC m=+370.850757055" watchObservedRunningTime="2026-01-21 11:01:34.648952528 +0000 UTC m=+386.252844532" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.649462 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xq95p" podStartSLOduration=35.842257107 podStartE2EDuration="2m44.649454955s" podCreationTimestamp="2026-01-21 10:58:50 +0000 UTC" firstStartedPulling="2026-01-21 10:58:57.903957273 +0000 UTC m=+229.507849207" lastFinishedPulling="2026-01-21 11:01:06.711155131 +0000 UTC m=+358.315047055" observedRunningTime="2026-01-21 11:01:19.156736705 +0000 UTC m=+370.760628669" watchObservedRunningTime="2026-01-21 11:01:34.649454955 +0000 UTC m=+386.253346889" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.649598 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5kqxm" podStartSLOduration=22.063480565 podStartE2EDuration="2m44.64959474s" podCreationTimestamp="2026-01-21 10:58:50 +0000 UTC" firstStartedPulling="2026-01-21 10:58:57.892351628 +0000 UTC m=+229.496243562" lastFinishedPulling="2026-01-21 11:01:20.478465803 +0000 UTC m=+372.082357737" observedRunningTime="2026-01-21 11:01:28.022436235 +0000 UTC m=+379.626328159" watchObservedRunningTime="2026-01-21 11:01:34.64959474 +0000 UTC m=+386.253486674" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.658723 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d9qfn" podStartSLOduration=37.659227998 podStartE2EDuration="2m47.658696803s" podCreationTimestamp="2026-01-21 10:58:47 +0000 UTC" firstStartedPulling="2026-01-21 10:58:56.537788915 +0000 UTC m=+228.141680849" lastFinishedPulling="2026-01-21 11:01:06.53725772 +0000 UTC m=+358.141149654" observedRunningTime="2026-01-21 11:01:19.137799762 +0000 UTC m=+370.741691706" watchObservedRunningTime="2026-01-21 11:01:34.658696803 +0000 UTC m=+386.262588757" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.659713 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.661165 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qt57g" podStartSLOduration=34.69133039 podStartE2EDuration="2m44.661154725s" podCreationTimestamp="2026-01-21 10:58:50 +0000 UTC" firstStartedPulling="2026-01-21 10:58:56.795463112 +0000 UTC m=+228.399355046" lastFinishedPulling="2026-01-21 11:01:06.765287447 +0000 UTC m=+358.369179381" observedRunningTime="2026-01-21 11:01:19.175592794 +0000 UTC m=+370.779484748" watchObservedRunningTime="2026-01-21 11:01:34.661154725 +0000 UTC m=+386.265046659" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.662751 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.663317 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=38.663283206 podStartE2EDuration="38.663283206s" podCreationTimestamp="2026-01-21 11:00:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:01:19.607110597 +0000 UTC m=+371.211002531" watchObservedRunningTime="2026-01-21 11:01:34.663283206 +0000 UTC m=+386.267175140" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.664576 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x5pnh" podStartSLOduration=17.005483592 podStartE2EDuration="2m47.664568339s" podCreationTimestamp="2026-01-21 10:58:47 +0000 UTC" firstStartedPulling="2026-01-21 10:58:56.445888656 +0000 UTC m=+228.049780590" lastFinishedPulling="2026-01-21 11:01:27.104973413 +0000 UTC m=+378.708865337" observedRunningTime="2026-01-21 11:01:27.96233798 +0000 UTC m=+379.566229914" watchObservedRunningTime="2026-01-21 11:01:34.664568339 +0000 UTC m=+386.268460273" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.665135 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p5std" podStartSLOduration=36.068701686 podStartE2EDuration="2m47.665125197s" podCreationTimestamp="2026-01-21 10:58:47 +0000 UTC" firstStartedPulling="2026-01-21 10:58:55.164930664 +0000 UTC m=+226.768822608" lastFinishedPulling="2026-01-21 11:01:06.761354185 +0000 UTC m=+358.365246119" observedRunningTime="2026-01-21 11:01:19.312596853 +0000 UTC m=+370.916488787" watchObservedRunningTime="2026-01-21 11:01:34.665125197 +0000 UTC m=+386.269017121" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.667219 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k9xnv" podStartSLOduration=17.060152569 podStartE2EDuration="2m47.667209077s" podCreationTimestamp="2026-01-21 10:58:47 +0000 UTC" firstStartedPulling="2026-01-21 10:58:56.431145427 +0000 UTC m=+228.035037361" lastFinishedPulling="2026-01-21 11:01:27.038201935 +0000 UTC m=+378.642093869" observedRunningTime="2026-01-21 11:01:27.938425563 +0000 UTC m=+379.542317497" watchObservedRunningTime="2026-01-21 11:01:34.667209077 +0000 UTC m=+386.271101031" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.669191 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vwhv9"] Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.747367 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.865662 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.954097 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.964268 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.965156 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 21 11:01:34 crc kubenswrapper[4925]: I0121 11:01:34.966844 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.003922 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.014819 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.047766 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.068690 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.169983 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.288897 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.288933 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.349561 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.349639 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.445984 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.460749 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.511647 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" path="/var/lib/kubelet/pods/b4eed50b-ef22-4637-9aa1-d8528310aed1/volumes" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.609633 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.655957 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.825587 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.949770 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 21 11:01:35 crc kubenswrapper[4925]: I0121 11:01:35.949934 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.080121 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.171909 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.203232 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.226757 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8330395569b73d5c1edc7cb9b55e0d3d61e9819d2819f351f9b52a61132f38be"} Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.279756 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.305421 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.348059 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.376179 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.387368 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.428062 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.520333 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.626324 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.632954 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.737187 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.802292 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.835635 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 21 11:01:36 crc kubenswrapper[4925]: I0121 11:01:36.889172 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.019677 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.061929 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.201866 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.249746 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.290691 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.334354 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.350610 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.365349 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.410485 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.517538 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.520296 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.658072 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.687287 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.693211 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.764363 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.768682 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.775031 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.807584 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.831426 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.834970 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.847662 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.970886 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.976196 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 21 11:01:37 crc kubenswrapper[4925]: I0121 11:01:37.995960 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.044884 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.128914 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.167606 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.210802 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.340683 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.413886 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.474439 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.534737 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.637556 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.648660 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.661941 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.685281 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.811299 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.845180 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.894388 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7445b77485-m7ckc"] Jan 21 11:01:38 crc kubenswrapper[4925]: E0121 11:01:38.894806 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.894826 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" Jan 21 11:01:38 crc kubenswrapper[4925]: E0121 11:01:38.894851 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" containerName="installer" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.894859 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" containerName="installer" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.895005 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9358efee-87a3-49bf-a75c-a45dc2ac2987" containerName="installer" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.895026 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4eed50b-ef22-4637-9aa1-d8528310aed1" containerName="oauth-openshift" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.895821 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:38 crc kubenswrapper[4925]: I0121 11:01:38.921488 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7445b77485-m7ckc"] Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.050936 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051013 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051051 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-audit-dir\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051082 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-audit-policies\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051118 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-service-ca\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051152 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051191 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-login\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051557 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051657 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051710 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-session\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051737 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-router-certs\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051787 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051817 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-error\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.051836 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqdsz\" (UniqueName: \"kubernetes.io/projected/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-kube-api-access-gqdsz\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.063735 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.063810 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.064058 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.065166 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.067616 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.069512 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.069821 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.070111 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.070499 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.070944 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.071273 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.071634 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.072687 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.078776 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.079284 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.080541 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.152932 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153069 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153101 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-audit-dir\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153129 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-audit-policies\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153151 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-service-ca\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153174 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153204 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-login\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153236 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153263 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153284 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-session\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153299 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-router-certs\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153323 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153340 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-error\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.153370 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqdsz\" (UniqueName: \"kubernetes.io/projected/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-kube-api-access-gqdsz\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.156688 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.157253 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-service-ca\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.158274 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-audit-dir\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.158600 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.158839 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-audit-policies\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.165637 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-session\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.165655 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-error\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.165734 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.165968 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-router-certs\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.166565 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.166896 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.181558 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.181897 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.182152 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.182275 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.189871 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-user-template-login\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.192310 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.194103 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqdsz\" (UniqueName: \"kubernetes.io/projected/e9b8abff-ca3c-47d7-9c5e-f6a23574e25d-kube-api-access-gqdsz\") pod \"oauth-openshift-7445b77485-m7ckc\" (UID: \"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d\") " pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.217971 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.342012 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.353188 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.405025 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.420386 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.450874 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.478142 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.486865 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.498453 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.574226 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.598924 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.603163 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.649185 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.680534 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.803232 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.941254 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.991910 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.994989 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 21 11:01:39 crc kubenswrapper[4925]: I0121 11:01:39.997739 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.059621 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.119472 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.122276 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.168430 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.235682 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.248327 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.311320 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.345639 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.571121 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.611681 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.688749 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.819336 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.828215 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 21 11:01:40 crc kubenswrapper[4925]: I0121 11:01:40.863799 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.015506 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.034620 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.089969 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.157084 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.193246 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7445b77485-m7ckc"] Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.196351 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.262180 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" event={"ID":"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d","Type":"ContainerStarted","Data":"89f3574de110cdd4bced098c419238922549fab8d234e1ccfddf842ded8e9cab"} Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.270615 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.271234 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.277032 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.310056 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.364625 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.400647 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.411501 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.467987 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.558529 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.614855 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.615026 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.715314 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.737873 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.750334 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.770683 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.898547 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 21 11:01:41 crc kubenswrapper[4925]: I0121 11:01:41.974799 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.023556 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.078087 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.098140 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.223662 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.224355 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.375016 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.376760 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.619217 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.642339 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.768750 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.810379 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.894576 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.931382 4925 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 21 11:01:42 crc kubenswrapper[4925]: I0121 11:01:42.963713 4925 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.081566 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.217592 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.240125 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.274748 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.284841 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5547876077b90c8678789491462f0aac7b66ff23ea3cea635029d8e1228bc5b1"} Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.285106 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.285242 4925 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.285280 4925 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.287453 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" event={"ID":"e9b8abff-ca3c-47d7-9c5e-f6a23574e25d","Type":"ContainerStarted","Data":"b3583a68c5d21c21f3119f79fd4203bd53a2195d8c047fdc84406332051b48af"} Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.288020 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.297738 4925 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.299985 4925 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec09e32b-ba93-4d22-b975-2616c14ba9cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2365ca45c07c7bce80252fd61b8ee7a1c5bf0f3ed4af0b4cee6d2bc6f061dc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T11:01:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1d33819ce5785122848e7af275813629e452f2954c2a9868cc4d77df7d46e4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T11:01:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c184ff3e795eaa7969a616c14ada155198e477553813fb0f88d94e203cc2a9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T11:01:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5547876077b90c8678789491462f0aac7b66ff23ea3cea635029d8e1228bc5b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T11:01:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8330395569b73d5c1edc7cb9b55e0d3d61e9819d2819f351f9b52a61132f38be\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T11:01:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": pods \"kube-apiserver-crc\" not found" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.312377 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.318830 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.331472 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.331856 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" podStartSLOduration=78.331843023 podStartE2EDuration="1m18.331843023s" podCreationTimestamp="2026-01-21 11:00:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:01:43.323187926 +0000 UTC m=+394.927079930" watchObservedRunningTime="2026-01-21 11:01:43.331843023 +0000 UTC m=+394.935734957" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.411075 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7445b77485-m7ckc" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.504480 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.590310 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 21 11:01:43 crc kubenswrapper[4925]: I0121 11:01:43.793676 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.002747 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.049607 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.101301 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.152040 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.152540 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.297799 4925 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.297871 4925 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ec09e32b-ba93-4d22-b975-2616c14ba9cd" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.300649 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.495644 4925 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.560413 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.560500 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.566766 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.601488 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=1.6014612129999999 podStartE2EDuration="1.601461213s" podCreationTimestamp="2026-01-21 11:01:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:01:43.456657407 +0000 UTC m=+395.060549341" watchObservedRunningTime="2026-01-21 11:01:44.601461213 +0000 UTC m=+396.205353147" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.702528 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 21 11:01:44 crc kubenswrapper[4925]: I0121 11:01:44.988867 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 21 11:01:45 crc kubenswrapper[4925]: I0121 11:01:45.006583 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 21 11:01:45 crc kubenswrapper[4925]: I0121 11:01:45.172701 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 21 11:01:45 crc kubenswrapper[4925]: I0121 11:01:45.251049 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 21 11:01:45 crc kubenswrapper[4925]: I0121 11:01:45.331597 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:01:45 crc kubenswrapper[4925]: I0121 11:01:45.536245 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 21 11:01:45 crc kubenswrapper[4925]: I0121 11:01:45.942936 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 21 11:01:45 crc kubenswrapper[4925]: I0121 11:01:45.946503 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 21 11:01:46 crc kubenswrapper[4925]: I0121 11:01:46.220861 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 21 11:01:59 crc kubenswrapper[4925]: I0121 11:01:59.567379 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 11:02:04 crc kubenswrapper[4925]: I0121 11:02:04.991759 4925 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 21 11:02:04 crc kubenswrapper[4925]: I0121 11:02:04.992820 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://bd324a95fef87895723cb01beb8c3aa59b0283ea7ea20fa43711478b91c060bf" gracePeriod=5 Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.547890 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.548866 4925 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="bd324a95fef87895723cb01beb8c3aa59b0283ea7ea20fa43711478b91c060bf" exitCode=137 Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.638534 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.638677 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.790542 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.790156 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.792629 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.792727 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.792789 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.792803 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.792871 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.792983 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.793239 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.793691 4925 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.793715 4925 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.793725 4925 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.793738 4925 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.804807 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:02:10 crc kubenswrapper[4925]: I0121 11:02:10.895874 4925 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.511307 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.512581 4925 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.525161 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.525218 4925 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="502cfe7c-4377-4748-ab5f-d667cff48aaa" Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.530786 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.530865 4925 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="502cfe7c-4377-4748-ab5f-d667cff48aaa" Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.558695 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.558863 4925 scope.go:117] "RemoveContainer" containerID="bd324a95fef87895723cb01beb8c3aa59b0283ea7ea20fa43711478b91c060bf" Jan 21 11:02:11 crc kubenswrapper[4925]: I0121 11:02:11.559046 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 11:02:15 crc kubenswrapper[4925]: I0121 11:02:15.188077 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx"] Jan 21 11:02:15 crc kubenswrapper[4925]: I0121 11:02:15.188898 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" podUID="4374182f-5a91-416c-a25c-c20b66d4fb68" containerName="controller-manager" containerID="cri-o://f3adec58f0c35fff173da56a96d0ee34c5238d306c503ff9c5742bfeeab48478" gracePeriod=30 Jan 21 11:02:15 crc kubenswrapper[4925]: I0121 11:02:15.286817 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk"] Jan 21 11:02:15 crc kubenswrapper[4925]: I0121 11:02:15.287236 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" podUID="2f99616a-3317-414e-a865-dc4753aed67a" containerName="route-controller-manager" containerID="cri-o://5e342e4189a05be7c352bf2724df379aeda6d25be0bc3498959fb07440e1bec5" gracePeriod=30 Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.710908 4925 generic.go:334] "Generic (PLEG): container finished" podID="2f99616a-3317-414e-a865-dc4753aed67a" containerID="5e342e4189a05be7c352bf2724df379aeda6d25be0bc3498959fb07440e1bec5" exitCode=0 Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.711038 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" event={"ID":"2f99616a-3317-414e-a865-dc4753aed67a","Type":"ContainerDied","Data":"5e342e4189a05be7c352bf2724df379aeda6d25be0bc3498959fb07440e1bec5"} Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.715089 4925 generic.go:334] "Generic (PLEG): container finished" podID="4374182f-5a91-416c-a25c-c20b66d4fb68" containerID="f3adec58f0c35fff173da56a96d0ee34c5238d306c503ff9c5742bfeeab48478" exitCode=0 Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.715171 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" event={"ID":"4374182f-5a91-416c-a25c-c20b66d4fb68","Type":"ContainerDied","Data":"f3adec58f0c35fff173da56a96d0ee34c5238d306c503ff9c5742bfeeab48478"} Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.766513 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.907469 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-client-ca\") pod \"4374182f-5a91-416c-a25c-c20b66d4fb68\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.907812 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-config\") pod \"4374182f-5a91-416c-a25c-c20b66d4fb68\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.907860 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mc6jf\" (UniqueName: \"kubernetes.io/projected/4374182f-5a91-416c-a25c-c20b66d4fb68-kube-api-access-mc6jf\") pod \"4374182f-5a91-416c-a25c-c20b66d4fb68\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.908180 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-proxy-ca-bundles\") pod \"4374182f-5a91-416c-a25c-c20b66d4fb68\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.908226 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4374182f-5a91-416c-a25c-c20b66d4fb68-serving-cert\") pod \"4374182f-5a91-416c-a25c-c20b66d4fb68\" (UID: \"4374182f-5a91-416c-a25c-c20b66d4fb68\") " Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.909626 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-client-ca" (OuterVolumeSpecName: "client-ca") pod "4374182f-5a91-416c-a25c-c20b66d4fb68" (UID: "4374182f-5a91-416c-a25c-c20b66d4fb68"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.909660 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-config" (OuterVolumeSpecName: "config") pod "4374182f-5a91-416c-a25c-c20b66d4fb68" (UID: "4374182f-5a91-416c-a25c-c20b66d4fb68"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.911089 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4374182f-5a91-416c-a25c-c20b66d4fb68" (UID: "4374182f-5a91-416c-a25c-c20b66d4fb68"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.916519 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4374182f-5a91-416c-a25c-c20b66d4fb68-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4374182f-5a91-416c-a25c-c20b66d4fb68" (UID: "4374182f-5a91-416c-a25c-c20b66d4fb68"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:02:16 crc kubenswrapper[4925]: I0121 11:02:16.927799 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4374182f-5a91-416c-a25c-c20b66d4fb68-kube-api-access-mc6jf" (OuterVolumeSpecName: "kube-api-access-mc6jf") pod "4374182f-5a91-416c-a25c-c20b66d4fb68" (UID: "4374182f-5a91-416c-a25c-c20b66d4fb68"). InnerVolumeSpecName "kube-api-access-mc6jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.011369 4925 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.011589 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4374182f-5a91-416c-a25c-c20b66d4fb68-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.011605 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.011619 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4374182f-5a91-416c-a25c-c20b66d4fb68-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.011639 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mc6jf\" (UniqueName: \"kubernetes.io/projected/4374182f-5a91-416c-a25c-c20b66d4fb68-kube-api-access-mc6jf\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.077358 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.113378 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8kxh\" (UniqueName: \"kubernetes.io/projected/2f99616a-3317-414e-a865-dc4753aed67a-kube-api-access-b8kxh\") pod \"2f99616a-3317-414e-a865-dc4753aed67a\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.113534 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-client-ca\") pod \"2f99616a-3317-414e-a865-dc4753aed67a\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.113575 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-config\") pod \"2f99616a-3317-414e-a865-dc4753aed67a\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.113622 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f99616a-3317-414e-a865-dc4753aed67a-serving-cert\") pod \"2f99616a-3317-414e-a865-dc4753aed67a\" (UID: \"2f99616a-3317-414e-a865-dc4753aed67a\") " Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.114894 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-client-ca" (OuterVolumeSpecName: "client-ca") pod "2f99616a-3317-414e-a865-dc4753aed67a" (UID: "2f99616a-3317-414e-a865-dc4753aed67a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.115084 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-config" (OuterVolumeSpecName: "config") pod "2f99616a-3317-414e-a865-dc4753aed67a" (UID: "2f99616a-3317-414e-a865-dc4753aed67a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.117633 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f99616a-3317-414e-a865-dc4753aed67a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2f99616a-3317-414e-a865-dc4753aed67a" (UID: "2f99616a-3317-414e-a865-dc4753aed67a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.117939 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f99616a-3317-414e-a865-dc4753aed67a-kube-api-access-b8kxh" (OuterVolumeSpecName: "kube-api-access-b8kxh") pod "2f99616a-3317-414e-a865-dc4753aed67a" (UID: "2f99616a-3317-414e-a865-dc4753aed67a"). InnerVolumeSpecName "kube-api-access-b8kxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.215216 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f99616a-3317-414e-a865-dc4753aed67a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.215280 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8kxh\" (UniqueName: \"kubernetes.io/projected/2f99616a-3317-414e-a865-dc4753aed67a-kube-api-access-b8kxh\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.215297 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.215308 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f99616a-3317-414e-a865-dc4753aed67a-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.708945 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2"] Jan 21 11:02:17 crc kubenswrapper[4925]: E0121 11:02:17.712695 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.712747 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 21 11:02:17 crc kubenswrapper[4925]: E0121 11:02:17.712765 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f99616a-3317-414e-a865-dc4753aed67a" containerName="route-controller-manager" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.712773 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f99616a-3317-414e-a865-dc4753aed67a" containerName="route-controller-manager" Jan 21 11:02:17 crc kubenswrapper[4925]: E0121 11:02:17.712822 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4374182f-5a91-416c-a25c-c20b66d4fb68" containerName="controller-manager" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.712831 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="4374182f-5a91-416c-a25c-c20b66d4fb68" containerName="controller-manager" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.713264 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="4374182f-5a91-416c-a25c-c20b66d4fb68" containerName="controller-manager" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.713297 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f99616a-3317-414e-a865-dc4753aed67a" containerName="route-controller-manager" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.713312 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.714635 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.724214 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-547bb84987-857cm"] Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.726469 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.730047 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.730554 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" event={"ID":"2f99616a-3317-414e-a865-dc4753aed67a","Type":"ContainerDied","Data":"dc66a7989f370e6ef9db018f695ffde67f5fb105b2a4915060f61d617f6deff4"} Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.730685 4925 scope.go:117] "RemoveContainer" containerID="5e342e4189a05be7c352bf2724df379aeda6d25be0bc3498959fb07440e1bec5" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.737413 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" event={"ID":"4374182f-5a91-416c-a25c-c20b66d4fb68","Type":"ContainerDied","Data":"3a733b47255c64902ad96d63efe576a56a99740c8d193a9783bd97854eb71147"} Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.737520 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.741562 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2"] Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.750617 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-547bb84987-857cm"] Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.768757 4925 scope.go:117] "RemoveContainer" containerID="f3adec58f0c35fff173da56a96d0ee34c5238d306c503ff9c5742bfeeab48478" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.813021 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx"] Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825461 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-config\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825534 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7e6a8d-dada-4429-9178-341eb46f944d-serving-cert\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825557 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-client-ca\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825580 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-proxy-ca-bundles\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825612 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-serving-cert\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825631 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-client-ca\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825661 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-config\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825703 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5gm6\" (UniqueName: \"kubernetes.io/projected/6d7e6a8d-dada-4429-9178-341eb46f944d-kube-api-access-j5gm6\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.825741 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7zxv\" (UniqueName: \"kubernetes.io/projected/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-kube-api-access-h7zxv\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.829322 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5d84fbf44f-2dtzx"] Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.833865 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk"] Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.837276 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk"] Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.927066 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7e6a8d-dada-4429-9178-341eb46f944d-serving-cert\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.927126 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-client-ca\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.927173 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-proxy-ca-bundles\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.927254 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-serving-cert\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.929875 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-client-ca\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.929909 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-config\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.929760 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-proxy-ca-bundles\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.928741 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-client-ca\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.931051 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-client-ca\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.931495 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5gm6\" (UniqueName: \"kubernetes.io/projected/6d7e6a8d-dada-4429-9178-341eb46f944d-kube-api-access-j5gm6\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.932158 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-config\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.932590 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7e6a8d-dada-4429-9178-341eb46f944d-serving-cert\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.933333 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-serving-cert\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.935624 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7zxv\" (UniqueName: \"kubernetes.io/projected/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-kube-api-access-h7zxv\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.935813 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-config\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.939095 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-config\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.957209 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5gm6\" (UniqueName: \"kubernetes.io/projected/6d7e6a8d-dada-4429-9178-341eb46f944d-kube-api-access-j5gm6\") pod \"controller-manager-fdff8f5bc-8gkd2\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:17 crc kubenswrapper[4925]: I0121 11:02:17.959265 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7zxv\" (UniqueName: \"kubernetes.io/projected/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-kube-api-access-h7zxv\") pod \"route-controller-manager-547bb84987-857cm\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.023157 4925 patch_prober.go:28] interesting pod/route-controller-manager-64655f4f9f-27sjk container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.56:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.023242 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-64655f4f9f-27sjk" podUID="2f99616a-3317-414e-a865-dc4753aed67a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.56:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.095901 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.097142 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.343042 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-547bb84987-857cm"] Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.408644 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2"] Jan 21 11:02:18 crc kubenswrapper[4925]: W0121 11:02:18.411511 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d7e6a8d_dada_4429_9178_341eb46f944d.slice/crio-5e93c7c80a000bca466b76c2f8968a9d1f860753c42eceafa51f193a09e853fb WatchSource:0}: Error finding container 5e93c7c80a000bca466b76c2f8968a9d1f860753c42eceafa51f193a09e853fb: Status 404 returned error can't find the container with id 5e93c7c80a000bca466b76c2f8968a9d1f860753c42eceafa51f193a09e853fb Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.812318 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" event={"ID":"6d7e6a8d-dada-4429-9178-341eb46f944d","Type":"ContainerStarted","Data":"48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35"} Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.812498 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" event={"ID":"6d7e6a8d-dada-4429-9178-341eb46f944d","Type":"ContainerStarted","Data":"5e93c7c80a000bca466b76c2f8968a9d1f860753c42eceafa51f193a09e853fb"} Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.812721 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.816959 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" event={"ID":"d49539a3-0d7a-42bf-b2d9-b5f9e617650e","Type":"ContainerStarted","Data":"38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a"} Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.817010 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" event={"ID":"d49539a3-0d7a-42bf-b2d9-b5f9e617650e","Type":"ContainerStarted","Data":"1b5bc0a1cd93b72747a0b56243dbf46c60947c0dda0a4b2c1daf22a27d4ada6b"} Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.818516 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.828908 4925 patch_prober.go:28] interesting pod/controller-manager-fdff8f5bc-8gkd2 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.829032 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" podUID="6d7e6a8d-dada-4429-9178-341eb46f944d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.829365 4925 patch_prober.go:28] interesting pod/route-controller-manager-547bb84987-857cm container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": dial tcp 10.217.0.63:8443: connect: connection refused" start-of-body= Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.829387 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" podUID="d49539a3-0d7a-42bf-b2d9-b5f9e617650e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": dial tcp 10.217.0.63:8443: connect: connection refused" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.891868 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" podStartSLOduration=3.891846047 podStartE2EDuration="3.891846047s" podCreationTimestamp="2026-01-21 11:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:02:18.890496792 +0000 UTC m=+430.494388726" watchObservedRunningTime="2026-01-21 11:02:18.891846047 +0000 UTC m=+430.495737981" Jan 21 11:02:18 crc kubenswrapper[4925]: I0121 11:02:18.899256 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" podStartSLOduration=3.899219563 podStartE2EDuration="3.899219563s" podCreationTimestamp="2026-01-21 11:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:02:18.854807622 +0000 UTC m=+430.458699566" watchObservedRunningTime="2026-01-21 11:02:18.899219563 +0000 UTC m=+430.503111497" Jan 21 11:02:19 crc kubenswrapper[4925]: I0121 11:02:19.512216 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f99616a-3317-414e-a865-dc4753aed67a" path="/var/lib/kubelet/pods/2f99616a-3317-414e-a865-dc4753aed67a/volumes" Jan 21 11:02:19 crc kubenswrapper[4925]: I0121 11:02:19.513315 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4374182f-5a91-416c-a25c-c20b66d4fb68" path="/var/lib/kubelet/pods/4374182f-5a91-416c-a25c-c20b66d4fb68/volumes" Jan 21 11:02:19 crc kubenswrapper[4925]: I0121 11:02:19.835019 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:19 crc kubenswrapper[4925]: I0121 11:02:19.836160 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:19 crc kubenswrapper[4925]: I0121 11:02:19.941421 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:02:19 crc kubenswrapper[4925]: I0121 11:02:19.941524 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.315529 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d9qfn"] Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.315995 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d9qfn" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="registry-server" containerID="cri-o://821b3521e48a32b1abb1a1ca0d1d34c8825715a62265b0c3149cc9870c101546" gracePeriod=2 Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.516005 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5bk"] Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.516328 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4w5bk" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="registry-server" containerID="cri-o://6089f50fd53569747272a97e61a346771ac54fc9a3588b31da9d82eef9546228" gracePeriod=2 Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.862742 4925 generic.go:334] "Generic (PLEG): container finished" podID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerID="6089f50fd53569747272a97e61a346771ac54fc9a3588b31da9d82eef9546228" exitCode=0 Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.862854 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5bk" event={"ID":"f6d949bc-f771-4100-8afa-ff89f3da97d7","Type":"ContainerDied","Data":"6089f50fd53569747272a97e61a346771ac54fc9a3588b31da9d82eef9546228"} Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.867602 4925 generic.go:334] "Generic (PLEG): container finished" podID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerID="821b3521e48a32b1abb1a1ca0d1d34c8825715a62265b0c3149cc9870c101546" exitCode=0 Jan 21 11:02:23 crc kubenswrapper[4925]: I0121 11:02:23.867684 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d9qfn" event={"ID":"c59d1347-a48d-4337-a8d1-2e5bef1f4535","Type":"ContainerDied","Data":"821b3521e48a32b1abb1a1ca0d1d34c8825715a62265b0c3149cc9870c101546"} Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.083761 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.212496 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sx98h\" (UniqueName: \"kubernetes.io/projected/f6d949bc-f771-4100-8afa-ff89f3da97d7-kube-api-access-sx98h\") pod \"f6d949bc-f771-4100-8afa-ff89f3da97d7\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.212656 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-catalog-content\") pod \"f6d949bc-f771-4100-8afa-ff89f3da97d7\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.212726 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-utilities\") pod \"f6d949bc-f771-4100-8afa-ff89f3da97d7\" (UID: \"f6d949bc-f771-4100-8afa-ff89f3da97d7\") " Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.214621 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-utilities" (OuterVolumeSpecName: "utilities") pod "f6d949bc-f771-4100-8afa-ff89f3da97d7" (UID: "f6d949bc-f771-4100-8afa-ff89f3da97d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.221848 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6d949bc-f771-4100-8afa-ff89f3da97d7-kube-api-access-sx98h" (OuterVolumeSpecName: "kube-api-access-sx98h") pod "f6d949bc-f771-4100-8afa-ff89f3da97d7" (UID: "f6d949bc-f771-4100-8afa-ff89f3da97d7"). InnerVolumeSpecName "kube-api-access-sx98h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.242488 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6d949bc-f771-4100-8afa-ff89f3da97d7" (UID: "f6d949bc-f771-4100-8afa-ff89f3da97d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.315126 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.315195 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sx98h\" (UniqueName: \"kubernetes.io/projected/f6d949bc-f771-4100-8afa-ff89f3da97d7-kube-api-access-sx98h\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.315210 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6d949bc-f771-4100-8afa-ff89f3da97d7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.516722 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.618353 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-catalog-content\") pod \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.618707 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45hql\" (UniqueName: \"kubernetes.io/projected/c59d1347-a48d-4337-a8d1-2e5bef1f4535-kube-api-access-45hql\") pod \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.618769 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-utilities\") pod \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\" (UID: \"c59d1347-a48d-4337-a8d1-2e5bef1f4535\") " Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.619916 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-utilities" (OuterVolumeSpecName: "utilities") pod "c59d1347-a48d-4337-a8d1-2e5bef1f4535" (UID: "c59d1347-a48d-4337-a8d1-2e5bef1f4535"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.627833 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c59d1347-a48d-4337-a8d1-2e5bef1f4535-kube-api-access-45hql" (OuterVolumeSpecName: "kube-api-access-45hql") pod "c59d1347-a48d-4337-a8d1-2e5bef1f4535" (UID: "c59d1347-a48d-4337-a8d1-2e5bef1f4535"). InnerVolumeSpecName "kube-api-access-45hql". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.684007 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c59d1347-a48d-4337-a8d1-2e5bef1f4535" (UID: "c59d1347-a48d-4337-a8d1-2e5bef1f4535"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.722124 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45hql\" (UniqueName: \"kubernetes.io/projected/c59d1347-a48d-4337-a8d1-2e5bef1f4535-kube-api-access-45hql\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.722206 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.722225 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59d1347-a48d-4337-a8d1-2e5bef1f4535-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.878282 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d9qfn" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.878276 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d9qfn" event={"ID":"c59d1347-a48d-4337-a8d1-2e5bef1f4535","Type":"ContainerDied","Data":"2c636e6bc4477ff1104784e408bb291fbf10d106e95dd2d8085f9b0ccb71cf70"} Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.878747 4925 scope.go:117] "RemoveContainer" containerID="821b3521e48a32b1abb1a1ca0d1d34c8825715a62265b0c3149cc9870c101546" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.885002 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5bk" event={"ID":"f6d949bc-f771-4100-8afa-ff89f3da97d7","Type":"ContainerDied","Data":"0fb9180355bdc02c44cf35bed9bfc48642ca7a4086b7af89dafb21ad84f1a897"} Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.885189 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5bk" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.904102 4925 scope.go:117] "RemoveContainer" containerID="20e7ea7a608ff732478ad8653d4077dbf0324eb00edd384ed14c0d56e13903ea" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.926580 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d9qfn"] Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.932772 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d9qfn"] Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.938655 4925 scope.go:117] "RemoveContainer" containerID="ee70e1ce9a091dc45869d21c6c727e4145bafd02e3b093d9e8d684d28fb4b05b" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.951459 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5bk"] Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.957175 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5bk"] Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.969800 4925 scope.go:117] "RemoveContainer" containerID="6089f50fd53569747272a97e61a346771ac54fc9a3588b31da9d82eef9546228" Jan 21 11:02:24 crc kubenswrapper[4925]: I0121 11:02:24.999504 4925 scope.go:117] "RemoveContainer" containerID="650f790578afacf55003bd3d670984f6027d7a772254f4d691fead59f209c71d" Jan 21 11:02:25 crc kubenswrapper[4925]: I0121 11:02:25.027248 4925 scope.go:117] "RemoveContainer" containerID="b9c3cc25c302118378be1508c24e8b928bf3a15a3c47f1e7a17185bc397809b9" Jan 21 11:02:25 crc kubenswrapper[4925]: I0121 11:02:25.512259 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" path="/var/lib/kubelet/pods/c59d1347-a48d-4337-a8d1-2e5bef1f4535/volumes" Jan 21 11:02:25 crc kubenswrapper[4925]: I0121 11:02:25.513021 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" path="/var/lib/kubelet/pods/f6d949bc-f771-4100-8afa-ff89f3da97d7/volumes" Jan 21 11:02:25 crc kubenswrapper[4925]: I0121 11:02:25.718312 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qt57g"] Jan 21 11:02:25 crc kubenswrapper[4925]: I0121 11:02:25.719299 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qt57g" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="registry-server" containerID="cri-o://b01154a3e793f29856b682701b21a35617264abb4f2a0a8338a14f6f8729527d" gracePeriod=2 Jan 21 11:02:26 crc kubenswrapper[4925]: I0121 11:02:26.204484 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k9xnv"] Jan 21 11:02:26 crc kubenswrapper[4925]: I0121 11:02:26.204842 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k9xnv" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="registry-server" containerID="cri-o://73ef3b1d7fd817b4e6e518b19757a67879eba8250d605677aae2c0c1ee285e98" gracePeriod=2 Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.206819 4925 generic.go:334] "Generic (PLEG): container finished" podID="3afd79f3-5455-427f-a278-62309cd643ec" containerID="b01154a3e793f29856b682701b21a35617264abb4f2a0a8338a14f6f8729527d" exitCode=0 Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.206985 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qt57g" event={"ID":"3afd79f3-5455-427f-a278-62309cd643ec","Type":"ContainerDied","Data":"b01154a3e793f29856b682701b21a35617264abb4f2a0a8338a14f6f8729527d"} Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.213566 4925 generic.go:334] "Generic (PLEG): container finished" podID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerID="73ef3b1d7fd817b4e6e518b19757a67879eba8250d605677aae2c0c1ee285e98" exitCode=0 Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.213648 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9xnv" event={"ID":"4c65dfb5-99b9-4899-9a86-b9e05194e9a4","Type":"ContainerDied","Data":"73ef3b1d7fd817b4e6e518b19757a67879eba8250d605677aae2c0c1ee285e98"} Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.273935 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.389933 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-utilities\") pod \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.390111 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-catalog-content\") pod \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.390181 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg7xq\" (UniqueName: \"kubernetes.io/projected/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-kube-api-access-mg7xq\") pod \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\" (UID: \"4c65dfb5-99b9-4899-9a86-b9e05194e9a4\") " Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.391120 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-utilities" (OuterVolumeSpecName: "utilities") pod "4c65dfb5-99b9-4899-9a86-b9e05194e9a4" (UID: "4c65dfb5-99b9-4899-9a86-b9e05194e9a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.399819 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-kube-api-access-mg7xq" (OuterVolumeSpecName: "kube-api-access-mg7xq") pod "4c65dfb5-99b9-4899-9a86-b9e05194e9a4" (UID: "4c65dfb5-99b9-4899-9a86-b9e05194e9a4"). InnerVolumeSpecName "kube-api-access-mg7xq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.402092 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.458425 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c65dfb5-99b9-4899-9a86-b9e05194e9a4" (UID: "4c65dfb5-99b9-4899-9a86-b9e05194e9a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.493256 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg7xq\" (UniqueName: \"kubernetes.io/projected/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-kube-api-access-mg7xq\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.493463 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.493553 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c65dfb5-99b9-4899-9a86-b9e05194e9a4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.595283 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqj7h\" (UniqueName: \"kubernetes.io/projected/3afd79f3-5455-427f-a278-62309cd643ec-kube-api-access-gqj7h\") pod \"3afd79f3-5455-427f-a278-62309cd643ec\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.595422 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-catalog-content\") pod \"3afd79f3-5455-427f-a278-62309cd643ec\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.595551 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-utilities\") pod \"3afd79f3-5455-427f-a278-62309cd643ec\" (UID: \"3afd79f3-5455-427f-a278-62309cd643ec\") " Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.597003 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-utilities" (OuterVolumeSpecName: "utilities") pod "3afd79f3-5455-427f-a278-62309cd643ec" (UID: "3afd79f3-5455-427f-a278-62309cd643ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.600847 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3afd79f3-5455-427f-a278-62309cd643ec-kube-api-access-gqj7h" (OuterVolumeSpecName: "kube-api-access-gqj7h") pod "3afd79f3-5455-427f-a278-62309cd643ec" (UID: "3afd79f3-5455-427f-a278-62309cd643ec"). InnerVolumeSpecName "kube-api-access-gqj7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.699212 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqj7h\" (UniqueName: \"kubernetes.io/projected/3afd79f3-5455-427f-a278-62309cd643ec-kube-api-access-gqj7h\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.699454 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.728063 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3afd79f3-5455-427f-a278-62309cd643ec" (UID: "3afd79f3-5455-427f-a278-62309cd643ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:02:27 crc kubenswrapper[4925]: I0121 11:02:27.801899 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afd79f3-5455-427f-a278-62309cd643ec-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.224035 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9xnv" event={"ID":"4c65dfb5-99b9-4899-9a86-b9e05194e9a4","Type":"ContainerDied","Data":"2a279298cb460e86cfcb2eece5fd741bff561541dda8234c6406e1524c336761"} Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.224106 4925 scope.go:117] "RemoveContainer" containerID="73ef3b1d7fd817b4e6e518b19757a67879eba8250d605677aae2c0c1ee285e98" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.224286 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9xnv" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.229133 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qt57g" event={"ID":"3afd79f3-5455-427f-a278-62309cd643ec","Type":"ContainerDied","Data":"8cb8b63d283f465b8988374ab0c8fd1eeb2243e0517fb739add08f9c99f90c21"} Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.229284 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qt57g" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.255927 4925 scope.go:117] "RemoveContainer" containerID="e8f2ee9293a890398d2b0260c4de1be357909e18f6beff9c3675dccaafe5ab61" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.269486 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k9xnv"] Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.274800 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k9xnv"] Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.324568 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qt57g"] Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.325419 4925 scope.go:117] "RemoveContainer" containerID="2bf163e510f0cfb687e17b13a7e2ef82f3047a4501fa7acc8fd440566cf3f6fd" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.345161 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qt57g"] Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.383500 4925 scope.go:117] "RemoveContainer" containerID="b01154a3e793f29856b682701b21a35617264abb4f2a0a8338a14f6f8729527d" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.409709 4925 scope.go:117] "RemoveContainer" containerID="325c6df9295d5f56b5ae11f2e589bf763983fa015eaefebd533498a549202626" Jan 21 11:02:28 crc kubenswrapper[4925]: I0121 11:02:28.431982 4925 scope.go:117] "RemoveContainer" containerID="b41f26a985e7845cb9ab9cd567ae885660102b037a4083bae748fcef9a70262b" Jan 21 11:02:29 crc kubenswrapper[4925]: I0121 11:02:29.512880 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3afd79f3-5455-427f-a278-62309cd643ec" path="/var/lib/kubelet/pods/3afd79f3-5455-427f-a278-62309cd643ec/volumes" Jan 21 11:02:29 crc kubenswrapper[4925]: I0121 11:02:29.514131 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" path="/var/lib/kubelet/pods/4c65dfb5-99b9-4899-9a86-b9e05194e9a4/volumes" Jan 21 11:02:49 crc kubenswrapper[4925]: I0121 11:02:49.941086 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:02:49 crc kubenswrapper[4925]: I0121 11:02:49.942067 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:02:55 crc kubenswrapper[4925]: I0121 11:02:55.218322 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2"] Jan 21 11:02:55 crc kubenswrapper[4925]: I0121 11:02:55.219370 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" podUID="6d7e6a8d-dada-4429-9178-341eb46f944d" containerName="controller-manager" containerID="cri-o://48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35" gracePeriod=30 Jan 21 11:02:55 crc kubenswrapper[4925]: I0121 11:02:55.252143 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-547bb84987-857cm"] Jan 21 11:02:55 crc kubenswrapper[4925]: I0121 11:02:55.252939 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" podUID="d49539a3-0d7a-42bf-b2d9-b5f9e617650e" containerName="route-controller-manager" containerID="cri-o://38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a" gracePeriod=30 Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.243087 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.279093 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.347022 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-config\") pod \"6d7e6a8d-dada-4429-9178-341eb46f944d\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.347192 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7e6a8d-dada-4429-9178-341eb46f944d-serving-cert\") pod \"6d7e6a8d-dada-4429-9178-341eb46f944d\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.347254 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-proxy-ca-bundles\") pod \"6d7e6a8d-dada-4429-9178-341eb46f944d\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.347297 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-client-ca\") pod \"6d7e6a8d-dada-4429-9178-341eb46f944d\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.347372 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5gm6\" (UniqueName: \"kubernetes.io/projected/6d7e6a8d-dada-4429-9178-341eb46f944d-kube-api-access-j5gm6\") pod \"6d7e6a8d-dada-4429-9178-341eb46f944d\" (UID: \"6d7e6a8d-dada-4429-9178-341eb46f944d\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.348622 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-client-ca" (OuterVolumeSpecName: "client-ca") pod "6d7e6a8d-dada-4429-9178-341eb46f944d" (UID: "6d7e6a8d-dada-4429-9178-341eb46f944d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.348638 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6d7e6a8d-dada-4429-9178-341eb46f944d" (UID: "6d7e6a8d-dada-4429-9178-341eb46f944d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.349100 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-config" (OuterVolumeSpecName: "config") pod "6d7e6a8d-dada-4429-9178-341eb46f944d" (UID: "6d7e6a8d-dada-4429-9178-341eb46f944d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.356860 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d7e6a8d-dada-4429-9178-341eb46f944d-kube-api-access-j5gm6" (OuterVolumeSpecName: "kube-api-access-j5gm6") pod "6d7e6a8d-dada-4429-9178-341eb46f944d" (UID: "6d7e6a8d-dada-4429-9178-341eb46f944d"). InnerVolumeSpecName "kube-api-access-j5gm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.358918 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d7e6a8d-dada-4429-9178-341eb46f944d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6d7e6a8d-dada-4429-9178-341eb46f944d" (UID: "6d7e6a8d-dada-4429-9178-341eb46f944d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.438807 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dfq6r"] Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439100 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439115 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439139 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439146 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439160 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439166 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439177 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d7e6a8d-dada-4429-9178-341eb46f944d" containerName="controller-manager" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439187 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d7e6a8d-dada-4429-9178-341eb46f944d" containerName="controller-manager" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439203 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d49539a3-0d7a-42bf-b2d9-b5f9e617650e" containerName="route-controller-manager" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439211 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d49539a3-0d7a-42bf-b2d9-b5f9e617650e" containerName="route-controller-manager" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439221 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439228 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439239 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439247 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439262 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439272 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="extract-utilities" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439284 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439293 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439304 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439311 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439318 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439325 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439332 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439339 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439350 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439356 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.439366 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439372 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="extract-content" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439492 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d49539a3-0d7a-42bf-b2d9-b5f9e617650e" containerName="route-controller-manager" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439513 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c65dfb5-99b9-4899-9a86-b9e05194e9a4" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439523 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c59d1347-a48d-4337-a8d1-2e5bef1f4535" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439535 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6d949bc-f771-4100-8afa-ff89f3da97d7" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439542 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3afd79f3-5455-427f-a278-62309cd643ec" containerName="registry-server" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.439550 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d7e6a8d-dada-4429-9178-341eb46f944d" containerName="controller-manager" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.440105 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.448404 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-config\") pod \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.448487 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7zxv\" (UniqueName: \"kubernetes.io/projected/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-kube-api-access-h7zxv\") pod \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.448528 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-serving-cert\") pod \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.448616 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-client-ca\") pod \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\" (UID: \"d49539a3-0d7a-42bf-b2d9-b5f9e617650e\") " Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.448947 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7e6a8d-dada-4429-9178-341eb46f944d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.448975 4925 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.448991 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.449006 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5gm6\" (UniqueName: \"kubernetes.io/projected/6d7e6a8d-dada-4429-9178-341eb46f944d-kube-api-access-j5gm6\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.449020 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7e6a8d-dada-4429-9178-341eb46f944d-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.449696 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-config" (OuterVolumeSpecName: "config") pod "d49539a3-0d7a-42bf-b2d9-b5f9e617650e" (UID: "d49539a3-0d7a-42bf-b2d9-b5f9e617650e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.449724 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-client-ca" (OuterVolumeSpecName: "client-ca") pod "d49539a3-0d7a-42bf-b2d9-b5f9e617650e" (UID: "d49539a3-0d7a-42bf-b2d9-b5f9e617650e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.452954 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d49539a3-0d7a-42bf-b2d9-b5f9e617650e" (UID: "d49539a3-0d7a-42bf-b2d9-b5f9e617650e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.453609 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dfq6r"] Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.461170 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-kube-api-access-h7zxv" (OuterVolumeSpecName: "kube-api-access-h7zxv") pod "d49539a3-0d7a-42bf-b2d9-b5f9e617650e" (UID: "d49539a3-0d7a-42bf-b2d9-b5f9e617650e"). InnerVolumeSpecName "kube-api-access-h7zxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.482159 4925 generic.go:334] "Generic (PLEG): container finished" podID="6d7e6a8d-dada-4429-9178-341eb46f944d" containerID="48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35" exitCode=0 Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.482232 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.482228 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" event={"ID":"6d7e6a8d-dada-4429-9178-341eb46f944d","Type":"ContainerDied","Data":"48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35"} Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.482331 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2" event={"ID":"6d7e6a8d-dada-4429-9178-341eb46f944d","Type":"ContainerDied","Data":"5e93c7c80a000bca466b76c2f8968a9d1f860753c42eceafa51f193a09e853fb"} Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.482359 4925 scope.go:117] "RemoveContainer" containerID="48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.485760 4925 generic.go:334] "Generic (PLEG): container finished" podID="d49539a3-0d7a-42bf-b2d9-b5f9e617650e" containerID="38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a" exitCode=0 Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.485867 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" event={"ID":"d49539a3-0d7a-42bf-b2d9-b5f9e617650e","Type":"ContainerDied","Data":"38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a"} Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.485941 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" event={"ID":"d49539a3-0d7a-42bf-b2d9-b5f9e617650e","Type":"ContainerDied","Data":"1b5bc0a1cd93b72747a0b56243dbf46c60947c0dda0a4b2c1daf22a27d4ada6b"} Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.486045 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-547bb84987-857cm" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.513715 4925 scope.go:117] "RemoveContainer" containerID="48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.514444 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35\": container with ID starting with 48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35 not found: ID does not exist" containerID="48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.514516 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35"} err="failed to get container status \"48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35\": rpc error: code = NotFound desc = could not find container \"48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35\": container with ID starting with 48a63a3bc3548774448667ab6f90a20ce6603a0efcbaa6b537bf67362b0f8e35 not found: ID does not exist" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.514556 4925 scope.go:117] "RemoveContainer" containerID="38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.522491 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2"] Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.527136 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-fdff8f5bc-8gkd2"] Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.534119 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-547bb84987-857cm"] Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.540048 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-547bb84987-857cm"] Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.550112 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-registry-tls\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.550528 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjczd\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-kube-api-access-hjczd\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.550667 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-bound-sa-token\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.550781 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.552416 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-registry-certificates\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.552685 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.552858 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.552989 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-trusted-ca\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.553182 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.553275 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7zxv\" (UniqueName: \"kubernetes.io/projected/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-kube-api-access-h7zxv\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.553380 4925 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.553766 4925 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d49539a3-0d7a-42bf-b2d9-b5f9e617650e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.560312 4925 scope.go:117] "RemoveContainer" containerID="38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a" Jan 21 11:02:56 crc kubenswrapper[4925]: E0121 11:02:56.564775 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a\": container with ID starting with 38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a not found: ID does not exist" containerID="38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.564836 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a"} err="failed to get container status \"38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a\": rpc error: code = NotFound desc = could not find container \"38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a\": container with ID starting with 38590110c2f35e5c9e87d8da7c48d0f3cbebdbadacf9635f63dd18b98e39630a not found: ID does not exist" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.627475 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.655224 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-trusted-ca\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.655689 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-registry-tls\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.655842 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjczd\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-kube-api-access-hjczd\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.655962 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-bound-sa-token\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.656062 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-registry-certificates\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.656225 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.656374 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.657100 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.658237 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-trusted-ca\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.659693 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-registry-certificates\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.664047 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-registry-tls\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.664618 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.682465 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-bound-sa-token\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.684095 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjczd\" (UniqueName: \"kubernetes.io/projected/191ca2d9-4f66-4f3e-b27d-bac48d1f150a-kube-api-access-hjczd\") pod \"image-registry-66df7c8f76-dfq6r\" (UID: \"191ca2d9-4f66-4f3e-b27d-bac48d1f150a\") " pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.802083 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.959617 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr"] Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.961415 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.965720 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.965923 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.966051 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.966166 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.970307 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.971706 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.986142 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58dccb64f7-7hwmg"] Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.987879 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.994592 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.994633 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.994826 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.994880 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.994935 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 21 11:02:56 crc kubenswrapper[4925]: I0121 11:02:56.995222 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.014189 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.014562 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr"] Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.022220 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58dccb64f7-7hwmg"] Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.062658 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-client-ca\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.062747 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2909711b-8468-4125-977d-c9c64668ed22-serving-cert\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.062827 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-config\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.062897 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-config\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.062930 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrw8p\" (UniqueName: \"kubernetes.io/projected/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-kube-api-access-qrw8p\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.063004 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-client-ca\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.063079 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp9ql\" (UniqueName: \"kubernetes.io/projected/2909711b-8468-4125-977d-c9c64668ed22-kube-api-access-cp9ql\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.063118 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-proxy-ca-bundles\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.063154 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-serving-cert\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164573 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-config\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164692 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-config\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164719 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-client-ca\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164741 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrw8p\" (UniqueName: \"kubernetes.io/projected/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-kube-api-access-qrw8p\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164781 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp9ql\" (UniqueName: \"kubernetes.io/projected/2909711b-8468-4125-977d-c9c64668ed22-kube-api-access-cp9ql\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164807 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-proxy-ca-bundles\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164839 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-serving-cert\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164865 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-client-ca\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.164882 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2909711b-8468-4125-977d-c9c64668ed22-serving-cert\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.166177 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-client-ca\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.166290 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-config\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.166590 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-config\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.166991 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2909711b-8468-4125-977d-c9c64668ed22-proxy-ca-bundles\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.167324 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-client-ca\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.170544 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2909711b-8468-4125-977d-c9c64668ed22-serving-cert\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.171338 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-serving-cert\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.190766 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp9ql\" (UniqueName: \"kubernetes.io/projected/2909711b-8468-4125-977d-c9c64668ed22-kube-api-access-cp9ql\") pod \"controller-manager-58dccb64f7-7hwmg\" (UID: \"2909711b-8468-4125-977d-c9c64668ed22\") " pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.204650 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrw8p\" (UniqueName: \"kubernetes.io/projected/72af1efd-5fd6-4bc9-9e3b-eb85947c088f-kube-api-access-qrw8p\") pod \"route-controller-manager-654bb78b8-7zdlr\" (UID: \"72af1efd-5fd6-4bc9-9e3b-eb85947c088f\") " pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.290327 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.328491 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.404566 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dfq6r"] Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.559745 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d7e6a8d-dada-4429-9178-341eb46f944d" path="/var/lib/kubelet/pods/6d7e6a8d-dada-4429-9178-341eb46f944d/volumes" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.561339 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d49539a3-0d7a-42bf-b2d9-b5f9e617650e" path="/var/lib/kubelet/pods/d49539a3-0d7a-42bf-b2d9-b5f9e617650e/volumes" Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.562080 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" event={"ID":"191ca2d9-4f66-4f3e-b27d-bac48d1f150a","Type":"ContainerStarted","Data":"1e2bc58696f636d7fbb8b264718a1a51ca24c662c2b2d99acdd199ebb5cd292a"} Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.692138 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58dccb64f7-7hwmg"] Jan 21 11:02:57 crc kubenswrapper[4925]: W0121 11:02:57.700457 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2909711b_8468_4125_977d_c9c64668ed22.slice/crio-db188babed458fe9e7aff20e9d7d4a35075ca2660713e70b9eb68e9ae43ef3f9 WatchSource:0}: Error finding container db188babed458fe9e7aff20e9d7d4a35075ca2660713e70b9eb68e9ae43ef3f9: Status 404 returned error can't find the container with id db188babed458fe9e7aff20e9d7d4a35075ca2660713e70b9eb68e9ae43ef3f9 Jan 21 11:02:57 crc kubenswrapper[4925]: I0121 11:02:57.797275 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr"] Jan 21 11:02:57 crc kubenswrapper[4925]: W0121 11:02:57.802173 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72af1efd_5fd6_4bc9_9e3b_eb85947c088f.slice/crio-63996a0b51c9b3c678f5458a8c31b5983a1a61051808cd0e7df1f262dd7c2281 WatchSource:0}: Error finding container 63996a0b51c9b3c678f5458a8c31b5983a1a61051808cd0e7df1f262dd7c2281: Status 404 returned error can't find the container with id 63996a0b51c9b3c678f5458a8c31b5983a1a61051808cd0e7df1f262dd7c2281 Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.571936 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" event={"ID":"191ca2d9-4f66-4f3e-b27d-bac48d1f150a","Type":"ContainerStarted","Data":"a1340de5a9dbba985fb173dbd4c30fa433702d64949a829d01672f6bfbd60065"} Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.572526 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.574792 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" event={"ID":"2909711b-8468-4125-977d-c9c64668ed22","Type":"ContainerStarted","Data":"35bd5141978158b743e34c3900a9dcd01448381fcc676dfd15da017d04fb5916"} Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.574852 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" event={"ID":"2909711b-8468-4125-977d-c9c64668ed22","Type":"ContainerStarted","Data":"db188babed458fe9e7aff20e9d7d4a35075ca2660713e70b9eb68e9ae43ef3f9"} Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.577774 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.580540 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" event={"ID":"72af1efd-5fd6-4bc9-9e3b-eb85947c088f","Type":"ContainerStarted","Data":"a029e34a998ec4b0abaa70edc612de65a4a4950a2e58ef4656eb74aac412997d"} Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.580605 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" event={"ID":"72af1efd-5fd6-4bc9-9e3b-eb85947c088f","Type":"ContainerStarted","Data":"63996a0b51c9b3c678f5458a8c31b5983a1a61051808cd0e7df1f262dd7c2281"} Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.580884 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.590592 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.607644 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" podStartSLOduration=2.6075904149999998 podStartE2EDuration="2.607590415s" podCreationTimestamp="2026-01-21 11:02:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:02:58.606420636 +0000 UTC m=+470.210312590" watchObservedRunningTime="2026-01-21 11:02:58.607590415 +0000 UTC m=+470.211482349" Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.610259 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.651668 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-654bb78b8-7zdlr" podStartSLOduration=3.6516294 podStartE2EDuration="3.6516294s" podCreationTimestamp="2026-01-21 11:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:02:58.646131897 +0000 UTC m=+470.250023841" watchObservedRunningTime="2026-01-21 11:02:58.6516294 +0000 UTC m=+470.255521334" Jan 21 11:02:58 crc kubenswrapper[4925]: I0121 11:02:58.691974 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58dccb64f7-7hwmg" podStartSLOduration=3.691946312 podStartE2EDuration="3.691946312s" podCreationTimestamp="2026-01-21 11:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:02:58.688243669 +0000 UTC m=+470.292135623" watchObservedRunningTime="2026-01-21 11:02:58.691946312 +0000 UTC m=+470.295838246" Jan 21 11:03:16 crc kubenswrapper[4925]: I0121 11:03:16.807898 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-dfq6r" Jan 21 11:03:16 crc kubenswrapper[4925]: I0121 11:03:16.890259 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-m7dl4"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.504433 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p5std"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.505670 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p5std" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="registry-server" containerID="cri-o://d2554eeb38c550cc14043ec631fb50424a86e45219dbda416ddad7f0b4960b6d" gracePeriod=30 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.514171 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x5pnh"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.514948 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x5pnh" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="registry-server" containerID="cri-o://5f070fb17f289564191231b954b0caba00f8d80c298463bdb3ed82121a031b60" gracePeriod=30 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.527864 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8ht27"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.528341 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" containerID="cri-o://ab374bbeec044a9763397c48f5c4e9f1abbe5b26276b693babac0512431d3c99" gracePeriod=30 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.536623 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xq95p"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.537114 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xq95p" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="registry-server" containerID="cri-o://fefc9e19d31158aa9cc6d75f03c81a7dbd0d658311eb3f50178ea9268553c983" gracePeriod=30 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.551086 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5kqxm"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.551585 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5kqxm" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="registry-server" containerID="cri-o://e62ec8ed815c06740e3cc15a998b9e6d9f74706e7e12e4cbccf7b66b9d0351ea" gracePeriod=30 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.555915 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kzv24"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.562507 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.577696 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kzv24"] Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.641243 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9821d9aa-a481-43fd-a938-98d978d17299-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.641537 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtvw7\" (UniqueName: \"kubernetes.io/projected/9821d9aa-a481-43fd-a938-98d978d17299-kube-api-access-qtvw7\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.641583 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9821d9aa-a481-43fd-a938-98d978d17299-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.739286 4925 generic.go:334] "Generic (PLEG): container finished" podID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerID="5f070fb17f289564191231b954b0caba00f8d80c298463bdb3ed82121a031b60" exitCode=0 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.739357 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x5pnh" event={"ID":"970344f4-64f6-4ffc-9896-6dd169ca1553","Type":"ContainerDied","Data":"5f070fb17f289564191231b954b0caba00f8d80c298463bdb3ed82121a031b60"} Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.742838 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtvw7\" (UniqueName: \"kubernetes.io/projected/9821d9aa-a481-43fd-a938-98d978d17299-kube-api-access-qtvw7\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.743350 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9821d9aa-a481-43fd-a938-98d978d17299-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.743518 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9821d9aa-a481-43fd-a938-98d978d17299-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.745842 4925 generic.go:334] "Generic (PLEG): container finished" podID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerID="e62ec8ed815c06740e3cc15a998b9e6d9f74706e7e12e4cbccf7b66b9d0351ea" exitCode=0 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.745933 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kqxm" event={"ID":"758a7d1b-c327-42ee-a585-efa49ec90d5e","Type":"ContainerDied","Data":"e62ec8ed815c06740e3cc15a998b9e6d9f74706e7e12e4cbccf7b66b9d0351ea"} Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.746285 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9821d9aa-a481-43fd-a938-98d978d17299-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.749869 4925 generic.go:334] "Generic (PLEG): container finished" podID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerID="ab374bbeec044a9763397c48f5c4e9f1abbe5b26276b693babac0512431d3c99" exitCode=0 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.749992 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" event={"ID":"68968bee-6187-43fa-bad4-ab1eb83e9c68","Type":"ContainerDied","Data":"ab374bbeec044a9763397c48f5c4e9f1abbe5b26276b693babac0512431d3c99"} Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.752534 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9821d9aa-a481-43fd-a938-98d978d17299-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.757075 4925 generic.go:334] "Generic (PLEG): container finished" podID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerID="fefc9e19d31158aa9cc6d75f03c81a7dbd0d658311eb3f50178ea9268553c983" exitCode=0 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.757217 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xq95p" event={"ID":"e4de47a6-b14d-4651-8568-49845b60ee7e","Type":"ContainerDied","Data":"fefc9e19d31158aa9cc6d75f03c81a7dbd0d658311eb3f50178ea9268553c983"} Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.763563 4925 generic.go:334] "Generic (PLEG): container finished" podID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerID="d2554eeb38c550cc14043ec631fb50424a86e45219dbda416ddad7f0b4960b6d" exitCode=0 Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.763638 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerDied","Data":"d2554eeb38c550cc14043ec631fb50424a86e45219dbda416ddad7f0b4960b6d"} Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.771662 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtvw7\" (UniqueName: \"kubernetes.io/projected/9821d9aa-a481-43fd-a938-98d978d17299-kube-api-access-qtvw7\") pod \"marketplace-operator-79b997595-kzv24\" (UID: \"9821d9aa-a481-43fd-a938-98d978d17299\") " pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:18 crc kubenswrapper[4925]: I0121 11:03:18.887553 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.207372 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.354549 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6b8h\" (UniqueName: \"kubernetes.io/projected/e4de47a6-b14d-4651-8568-49845b60ee7e-kube-api-access-f6b8h\") pod \"e4de47a6-b14d-4651-8568-49845b60ee7e\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.354671 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-catalog-content\") pod \"e4de47a6-b14d-4651-8568-49845b60ee7e\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.354758 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-utilities\") pod \"e4de47a6-b14d-4651-8568-49845b60ee7e\" (UID: \"e4de47a6-b14d-4651-8568-49845b60ee7e\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.358928 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-utilities" (OuterVolumeSpecName: "utilities") pod "e4de47a6-b14d-4651-8568-49845b60ee7e" (UID: "e4de47a6-b14d-4651-8568-49845b60ee7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.363670 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4de47a6-b14d-4651-8568-49845b60ee7e-kube-api-access-f6b8h" (OuterVolumeSpecName: "kube-api-access-f6b8h") pod "e4de47a6-b14d-4651-8568-49845b60ee7e" (UID: "e4de47a6-b14d-4651-8568-49845b60ee7e"). InnerVolumeSpecName "kube-api-access-f6b8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.391852 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e4de47a6-b14d-4651-8568-49845b60ee7e" (UID: "e4de47a6-b14d-4651-8568-49845b60ee7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.456603 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.456701 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4de47a6-b14d-4651-8568-49845b60ee7e-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.456719 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6b8h\" (UniqueName: \"kubernetes.io/projected/e4de47a6-b14d-4651-8568-49845b60ee7e-kube-api-access-f6b8h\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.545914 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.554827 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.582412 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.596151 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5std" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663271 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqs9d\" (UniqueName: \"kubernetes.io/projected/68968bee-6187-43fa-bad4-ab1eb83e9c68-kube-api-access-jqs9d\") pod \"68968bee-6187-43fa-bad4-ab1eb83e9c68\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663334 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-utilities\") pod \"970344f4-64f6-4ffc-9896-6dd169ca1553\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663375 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-catalog-content\") pod \"758a7d1b-c327-42ee-a585-efa49ec90d5e\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663494 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-utilities\") pod \"758a7d1b-c327-42ee-a585-efa49ec90d5e\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663528 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kkp5\" (UniqueName: \"kubernetes.io/projected/970344f4-64f6-4ffc-9896-6dd169ca1553-kube-api-access-2kkp5\") pod \"970344f4-64f6-4ffc-9896-6dd169ca1553\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663561 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-catalog-content\") pod \"970344f4-64f6-4ffc-9896-6dd169ca1553\" (UID: \"970344f4-64f6-4ffc-9896-6dd169ca1553\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663597 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9csw\" (UniqueName: \"kubernetes.io/projected/758a7d1b-c327-42ee-a585-efa49ec90d5e-kube-api-access-f9csw\") pod \"758a7d1b-c327-42ee-a585-efa49ec90d5e\" (UID: \"758a7d1b-c327-42ee-a585-efa49ec90d5e\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663639 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-trusted-ca\") pod \"68968bee-6187-43fa-bad4-ab1eb83e9c68\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.663694 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-operator-metrics\") pod \"68968bee-6187-43fa-bad4-ab1eb83e9c68\" (UID: \"68968bee-6187-43fa-bad4-ab1eb83e9c68\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.664540 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-utilities" (OuterVolumeSpecName: "utilities") pod "970344f4-64f6-4ffc-9896-6dd169ca1553" (UID: "970344f4-64f6-4ffc-9896-6dd169ca1553"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.665213 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-utilities" (OuterVolumeSpecName: "utilities") pod "758a7d1b-c327-42ee-a585-efa49ec90d5e" (UID: "758a7d1b-c327-42ee-a585-efa49ec90d5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.666142 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "68968bee-6187-43fa-bad4-ab1eb83e9c68" (UID: "68968bee-6187-43fa-bad4-ab1eb83e9c68"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.684351 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/970344f4-64f6-4ffc-9896-6dd169ca1553-kube-api-access-2kkp5" (OuterVolumeSpecName: "kube-api-access-2kkp5") pod "970344f4-64f6-4ffc-9896-6dd169ca1553" (UID: "970344f4-64f6-4ffc-9896-6dd169ca1553"). InnerVolumeSpecName "kube-api-access-2kkp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.684643 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/758a7d1b-c327-42ee-a585-efa49ec90d5e-kube-api-access-f9csw" (OuterVolumeSpecName: "kube-api-access-f9csw") pod "758a7d1b-c327-42ee-a585-efa49ec90d5e" (UID: "758a7d1b-c327-42ee-a585-efa49ec90d5e"). InnerVolumeSpecName "kube-api-access-f9csw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.687236 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "68968bee-6187-43fa-bad4-ab1eb83e9c68" (UID: "68968bee-6187-43fa-bad4-ab1eb83e9c68"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.687850 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68968bee-6187-43fa-bad4-ab1eb83e9c68-kube-api-access-jqs9d" (OuterVolumeSpecName: "kube-api-access-jqs9d") pod "68968bee-6187-43fa-bad4-ab1eb83e9c68" (UID: "68968bee-6187-43fa-bad4-ab1eb83e9c68"). InnerVolumeSpecName "kube-api-access-jqs9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.707607 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kzv24"] Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.749843 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "970344f4-64f6-4ffc-9896-6dd169ca1553" (UID: "970344f4-64f6-4ffc-9896-6dd169ca1553"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767164 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xz8gh\" (UniqueName: \"kubernetes.io/projected/88c0c83d-a22b-4150-9572-ee68fb5f1e81-kube-api-access-xz8gh\") pod \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767274 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-utilities\") pod \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767412 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-catalog-content\") pod \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\" (UID: \"88c0c83d-a22b-4150-9572-ee68fb5f1e81\") " Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767831 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767861 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kkp5\" (UniqueName: \"kubernetes.io/projected/970344f4-64f6-4ffc-9896-6dd169ca1553-kube-api-access-2kkp5\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767887 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767901 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9csw\" (UniqueName: \"kubernetes.io/projected/758a7d1b-c327-42ee-a585-efa49ec90d5e-kube-api-access-f9csw\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767919 4925 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767932 4925 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68968bee-6187-43fa-bad4-ab1eb83e9c68-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767948 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqs9d\" (UniqueName: \"kubernetes.io/projected/68968bee-6187-43fa-bad4-ab1eb83e9c68-kube-api-access-jqs9d\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.767962 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/970344f4-64f6-4ffc-9896-6dd169ca1553-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.768466 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-utilities" (OuterVolumeSpecName: "utilities") pod "88c0c83d-a22b-4150-9572-ee68fb5f1e81" (UID: "88c0c83d-a22b-4150-9572-ee68fb5f1e81"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.772749 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88c0c83d-a22b-4150-9572-ee68fb5f1e81-kube-api-access-xz8gh" (OuterVolumeSpecName: "kube-api-access-xz8gh") pod "88c0c83d-a22b-4150-9572-ee68fb5f1e81" (UID: "88c0c83d-a22b-4150-9572-ee68fb5f1e81"). InnerVolumeSpecName "kube-api-access-xz8gh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.778610 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kqxm" event={"ID":"758a7d1b-c327-42ee-a585-efa49ec90d5e","Type":"ContainerDied","Data":"691a901442f536659c6b46bf664301ab71e3e3501f7571a84fe19db65d78fda7"} Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.779013 4925 scope.go:117] "RemoveContainer" containerID="e62ec8ed815c06740e3cc15a998b9e6d9f74706e7e12e4cbccf7b66b9d0351ea" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.778664 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kqxm" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.781541 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" event={"ID":"68968bee-6187-43fa-bad4-ab1eb83e9c68","Type":"ContainerDied","Data":"3143d70223ea42ba179d1f984da70b5f5bec19ad66eb2c76882bbbf89c41a2f3"} Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.781639 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8ht27" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.785937 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xq95p" event={"ID":"e4de47a6-b14d-4651-8568-49845b60ee7e","Type":"ContainerDied","Data":"cd53f36b196321d5416ce99b634fc8f51b3a64f0ca259cee2905f112c4236ac0"} Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.786085 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xq95p" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.798151 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5std" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.798968 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5std" event={"ID":"88c0c83d-a22b-4150-9572-ee68fb5f1e81","Type":"ContainerDied","Data":"dc2ed219a7a86aff66e395787fa688a802fec68ad690222fdd6262af5df03e65"} Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.801166 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" event={"ID":"9821d9aa-a481-43fd-a938-98d978d17299","Type":"ContainerStarted","Data":"0ad62bc0029666c04c08257fc3b30abd19990cd378aaecfe1a0b42ea93c0df55"} Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.807668 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x5pnh" event={"ID":"970344f4-64f6-4ffc-9896-6dd169ca1553","Type":"ContainerDied","Data":"91d710456a200dd01636e65640434f883794bbb59a0ad135f9417e16807457bb"} Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.807783 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x5pnh" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.825491 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88c0c83d-a22b-4150-9572-ee68fb5f1e81" (UID: "88c0c83d-a22b-4150-9572-ee68fb5f1e81"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.825918 4925 scope.go:117] "RemoveContainer" containerID="73e7e2ef08f80ead24699fb3d441128622d7fa05fb978ad51233a689cbca3352" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.839924 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "758a7d1b-c327-42ee-a585-efa49ec90d5e" (UID: "758a7d1b-c327-42ee-a585-efa49ec90d5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.850996 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xq95p"] Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.860762 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xq95p"] Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.867635 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8ht27"] Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.869257 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xz8gh\" (UniqueName: \"kubernetes.io/projected/88c0c83d-a22b-4150-9572-ee68fb5f1e81-kube-api-access-xz8gh\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.869546 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.869653 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/758a7d1b-c327-42ee-a585-efa49ec90d5e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.869734 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c0c83d-a22b-4150-9572-ee68fb5f1e81-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.875725 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8ht27"] Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.878541 4925 scope.go:117] "RemoveContainer" containerID="48b8d61046399f6c90694e5f036dd2aa5506bc62234cde66dacccfc84034744e" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.884849 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x5pnh"] Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.888907 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x5pnh"] Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.902873 4925 scope.go:117] "RemoveContainer" containerID="ab374bbeec044a9763397c48f5c4e9f1abbe5b26276b693babac0512431d3c99" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.934229 4925 scope.go:117] "RemoveContainer" containerID="fefc9e19d31158aa9cc6d75f03c81a7dbd0d658311eb3f50178ea9268553c983" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.941489 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.941627 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.941711 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.943943 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7d15779caa6e5b388f79a4466fbe1abe55140d18037403d8c0435912eed61b60"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.944045 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://7d15779caa6e5b388f79a4466fbe1abe55140d18037403d8c0435912eed61b60" gracePeriod=600 Jan 21 11:03:19 crc kubenswrapper[4925]: I0121 11:03:19.972782 4925 scope.go:117] "RemoveContainer" containerID="7e00837a1ae03655737bb4fb656f706083284f752840680f548fb3ef5eeae620" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.007655 4925 scope.go:117] "RemoveContainer" containerID="4d7f83e0fb63c60edcd5c14b60f38f9f558e2a90165e87a3d1a82bbfa24aa6e7" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.034802 4925 scope.go:117] "RemoveContainer" containerID="d2554eeb38c550cc14043ec631fb50424a86e45219dbda416ddad7f0b4960b6d" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.054990 4925 scope.go:117] "RemoveContainer" containerID="f012b32d2fc38a6091db0decd59a9c22bc6502a45cd0bb1fc9ee8e6edf68507b" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.082311 4925 scope.go:117] "RemoveContainer" containerID="0400ff4f00e12f3d471ed1ada23e8de1c582adedd143632df981968299002603" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.108068 4925 scope.go:117] "RemoveContainer" containerID="5f070fb17f289564191231b954b0caba00f8d80c298463bdb3ed82121a031b60" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.133888 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5kqxm"] Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.148880 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5kqxm"] Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.149963 4925 scope.go:117] "RemoveContainer" containerID="728d8fa74c94bb1befa031d409529d33781debb2fb0064707127fd558087063c" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.155329 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p5std"] Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.168042 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p5std"] Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.174794 4925 scope.go:117] "RemoveContainer" containerID="3b6fcb43863d0e64c50aaf0ab48a15d1c7203266b4cf5f7dd929eb6431949616" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.839676 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" event={"ID":"9821d9aa-a481-43fd-a938-98d978d17299","Type":"ContainerStarted","Data":"fa8955b0a60eebace417fc26208b6659a389a02b39ebd978ac76ebc743452be2"} Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.840342 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.845707 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="7d15779caa6e5b388f79a4466fbe1abe55140d18037403d8c0435912eed61b60" exitCode=0 Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.845818 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"7d15779caa6e5b388f79a4466fbe1abe55140d18037403d8c0435912eed61b60"} Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.846024 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"8ecb481e8ef1d0b6466c51999dd109a4671270510611a3058cedaf5fd5398994"} Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.846068 4925 scope.go:117] "RemoveContainer" containerID="e9517109bed244681851c5e081a5e888fe430467bd86aa2a6e102f22bb41b603" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.851612 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" Jan 21 11:03:20 crc kubenswrapper[4925]: I0121 11:03:20.872153 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-kzv24" podStartSLOduration=2.872127357 podStartE2EDuration="2.872127357s" podCreationTimestamp="2026-01-21 11:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:03:20.862458216 +0000 UTC m=+492.466350160" watchObservedRunningTime="2026-01-21 11:03:20.872127357 +0000 UTC m=+492.476019291" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.123276 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mldwz"] Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124176 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124196 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124243 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124251 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124264 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124273 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124284 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124291 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124300 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124306 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124317 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124323 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="extract-utilities" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124332 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124340 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124352 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124359 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124370 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124379 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124386 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124408 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124417 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124427 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124437 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124444 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="extract-content" Jan 21 11:03:21 crc kubenswrapper[4925]: E0121 11:03:21.124453 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124459 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124591 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124611 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124621 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124629 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" containerName="registry-server" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.124638 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" containerName="marketplace-operator" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.125554 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.128039 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.145494 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mldwz"] Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.290547 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c49d0579-4622-43ef-a28d-7cbf66ce5998-catalog-content\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.290668 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn5kl\" (UniqueName: \"kubernetes.io/projected/c49d0579-4622-43ef-a28d-7cbf66ce5998-kube-api-access-fn5kl\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.290730 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c49d0579-4622-43ef-a28d-7cbf66ce5998-utilities\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.393353 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c49d0579-4622-43ef-a28d-7cbf66ce5998-catalog-content\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.392761 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c49d0579-4622-43ef-a28d-7cbf66ce5998-catalog-content\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.393497 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn5kl\" (UniqueName: \"kubernetes.io/projected/c49d0579-4622-43ef-a28d-7cbf66ce5998-kube-api-access-fn5kl\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.393982 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c49d0579-4622-43ef-a28d-7cbf66ce5998-utilities\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.394382 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c49d0579-4622-43ef-a28d-7cbf66ce5998-utilities\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.425374 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn5kl\" (UniqueName: \"kubernetes.io/projected/c49d0579-4622-43ef-a28d-7cbf66ce5998-kube-api-access-fn5kl\") pod \"redhat-operators-mldwz\" (UID: \"c49d0579-4622-43ef-a28d-7cbf66ce5998\") " pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.457836 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.552523 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68968bee-6187-43fa-bad4-ab1eb83e9c68" path="/var/lib/kubelet/pods/68968bee-6187-43fa-bad4-ab1eb83e9c68/volumes" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.553285 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="758a7d1b-c327-42ee-a585-efa49ec90d5e" path="/var/lib/kubelet/pods/758a7d1b-c327-42ee-a585-efa49ec90d5e/volumes" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.554633 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88c0c83d-a22b-4150-9572-ee68fb5f1e81" path="/var/lib/kubelet/pods/88c0c83d-a22b-4150-9572-ee68fb5f1e81/volumes" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.556842 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="970344f4-64f6-4ffc-9896-6dd169ca1553" path="/var/lib/kubelet/pods/970344f4-64f6-4ffc-9896-6dd169ca1553/volumes" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.558155 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4de47a6-b14d-4651-8568-49845b60ee7e" path="/var/lib/kubelet/pods/e4de47a6-b14d-4651-8568-49845b60ee7e/volumes" Jan 21 11:03:21 crc kubenswrapper[4925]: I0121 11:03:21.956605 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mldwz"] Jan 21 11:03:21 crc kubenswrapper[4925]: W0121 11:03:21.974056 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc49d0579_4622_43ef_a28d_7cbf66ce5998.slice/crio-4f62b0eb1895ecbe26c33fad33de8a3565d94d70c52223526879e04e8a269f40 WatchSource:0}: Error finding container 4f62b0eb1895ecbe26c33fad33de8a3565d94d70c52223526879e04e8a269f40: Status 404 returned error can't find the container with id 4f62b0eb1895ecbe26c33fad33de8a3565d94d70c52223526879e04e8a269f40 Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.117671 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xbt62"] Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.121505 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.125083 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.135445 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbt62"] Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.209457 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19b1508e-f4b4-420f-abc7-d2c922cea0fc-utilities\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.209562 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19b1508e-f4b4-420f-abc7-d2c922cea0fc-catalog-content\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.209840 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbpcj\" (UniqueName: \"kubernetes.io/projected/19b1508e-f4b4-420f-abc7-d2c922cea0fc-kube-api-access-vbpcj\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.311277 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19b1508e-f4b4-420f-abc7-d2c922cea0fc-utilities\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.311383 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19b1508e-f4b4-420f-abc7-d2c922cea0fc-catalog-content\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.311451 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbpcj\" (UniqueName: \"kubernetes.io/projected/19b1508e-f4b4-420f-abc7-d2c922cea0fc-kube-api-access-vbpcj\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.311957 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19b1508e-f4b4-420f-abc7-d2c922cea0fc-utilities\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.312197 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19b1508e-f4b4-420f-abc7-d2c922cea0fc-catalog-content\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.334057 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbpcj\" (UniqueName: \"kubernetes.io/projected/19b1508e-f4b4-420f-abc7-d2c922cea0fc-kube-api-access-vbpcj\") pod \"community-operators-xbt62\" (UID: \"19b1508e-f4b4-420f-abc7-d2c922cea0fc\") " pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.445949 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.877430 4925 generic.go:334] "Generic (PLEG): container finished" podID="c49d0579-4622-43ef-a28d-7cbf66ce5998" containerID="aa7cbb8e2903d763647d1eb4f7ab2edfa3de7b37fc5f94105639b447be55000a" exitCode=0 Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.877917 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mldwz" event={"ID":"c49d0579-4622-43ef-a28d-7cbf66ce5998","Type":"ContainerDied","Data":"aa7cbb8e2903d763647d1eb4f7ab2edfa3de7b37fc5f94105639b447be55000a"} Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.878491 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mldwz" event={"ID":"c49d0579-4622-43ef-a28d-7cbf66ce5998","Type":"ContainerStarted","Data":"4f62b0eb1895ecbe26c33fad33de8a3565d94d70c52223526879e04e8a269f40"} Jan 21 11:03:22 crc kubenswrapper[4925]: I0121 11:03:22.912142 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbt62"] Jan 21 11:03:22 crc kubenswrapper[4925]: W0121 11:03:22.921814 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19b1508e_f4b4_420f_abc7_d2c922cea0fc.slice/crio-afa47332292298ee6026711603b5b6a5b3f7d9db41ebb1848a3f6516c2a87037 WatchSource:0}: Error finding container afa47332292298ee6026711603b5b6a5b3f7d9db41ebb1848a3f6516c2a87037: Status 404 returned error can't find the container with id afa47332292298ee6026711603b5b6a5b3f7d9db41ebb1848a3f6516c2a87037 Jan 21 11:03:23 crc kubenswrapper[4925]: I0121 11:03:23.885303 4925 generic.go:334] "Generic (PLEG): container finished" podID="19b1508e-f4b4-420f-abc7-d2c922cea0fc" containerID="13582cca90c2c88e1b9b0368ee71336567b5d0c2acc3f9ac0c3256e13ae4d779" exitCode=0 Jan 21 11:03:23 crc kubenswrapper[4925]: I0121 11:03:23.885369 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbt62" event={"ID":"19b1508e-f4b4-420f-abc7-d2c922cea0fc","Type":"ContainerDied","Data":"13582cca90c2c88e1b9b0368ee71336567b5d0c2acc3f9ac0c3256e13ae4d779"} Jan 21 11:03:23 crc kubenswrapper[4925]: I0121 11:03:23.885429 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbt62" event={"ID":"19b1508e-f4b4-420f-abc7-d2c922cea0fc","Type":"ContainerStarted","Data":"afa47332292298ee6026711603b5b6a5b3f7d9db41ebb1848a3f6516c2a87037"} Jan 21 11:03:23 crc kubenswrapper[4925]: I0121 11:03:23.922846 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xwrpb"] Jan 21 11:03:23 crc kubenswrapper[4925]: I0121 11:03:23.924633 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:23 crc kubenswrapper[4925]: I0121 11:03:23.929377 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 21 11:03:23 crc kubenswrapper[4925]: I0121 11:03:23.931415 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xwrpb"] Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.138878 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsfhg\" (UniqueName: \"kubernetes.io/projected/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-kube-api-access-wsfhg\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.139002 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-utilities\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.139071 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-catalog-content\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.240855 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsfhg\" (UniqueName: \"kubernetes.io/projected/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-kube-api-access-wsfhg\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.240945 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-utilities\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.241010 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-catalog-content\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.241816 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-catalog-content\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.242369 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-utilities\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.271024 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsfhg\" (UniqueName: \"kubernetes.io/projected/c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426-kube-api-access-wsfhg\") pod \"certified-operators-xwrpb\" (UID: \"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426\") " pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.373239 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.524707 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bbvfx"] Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.534149 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.537567 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.537607 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbvfx"] Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.651161 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d658610f-6e84-446d-9d81-e4e4198a6102-utilities\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.651261 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26kkv\" (UniqueName: \"kubernetes.io/projected/d658610f-6e84-446d-9d81-e4e4198a6102-kube-api-access-26kkv\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.651315 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d658610f-6e84-446d-9d81-e4e4198a6102-catalog-content\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.753050 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d658610f-6e84-446d-9d81-e4e4198a6102-utilities\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.753128 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26kkv\" (UniqueName: \"kubernetes.io/projected/d658610f-6e84-446d-9d81-e4e4198a6102-kube-api-access-26kkv\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.753159 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d658610f-6e84-446d-9d81-e4e4198a6102-catalog-content\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.753878 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d658610f-6e84-446d-9d81-e4e4198a6102-utilities\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.753900 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d658610f-6e84-446d-9d81-e4e4198a6102-catalog-content\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.778599 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26kkv\" (UniqueName: \"kubernetes.io/projected/d658610f-6e84-446d-9d81-e4e4198a6102-kube-api-access-26kkv\") pod \"redhat-marketplace-bbvfx\" (UID: \"d658610f-6e84-446d-9d81-e4e4198a6102\") " pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.864752 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.868405 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xwrpb"] Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.899531 4925 generic.go:334] "Generic (PLEG): container finished" podID="c49d0579-4622-43ef-a28d-7cbf66ce5998" containerID="a535a997f5e4b78437207894e00a8bd02fdadc0dc8f5decc66a121d036e67c3f" exitCode=0 Jan 21 11:03:24 crc kubenswrapper[4925]: I0121 11:03:24.899617 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mldwz" event={"ID":"c49d0579-4622-43ef-a28d-7cbf66ce5998","Type":"ContainerDied","Data":"a535a997f5e4b78437207894e00a8bd02fdadc0dc8f5decc66a121d036e67c3f"} Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.398215 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbvfx"] Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.908852 4925 generic.go:334] "Generic (PLEG): container finished" podID="c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426" containerID="fb64a2234742bca07e7b9c6e9fe0fe0d96a5ad803d0559c2c094e9a0e896a1a0" exitCode=0 Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.908948 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xwrpb" event={"ID":"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426","Type":"ContainerDied","Data":"fb64a2234742bca07e7b9c6e9fe0fe0d96a5ad803d0559c2c094e9a0e896a1a0"} Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.909433 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xwrpb" event={"ID":"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426","Type":"ContainerStarted","Data":"b172bfc384811f3ae2b4d9d212443923a1c266001db70648824b8f9c57a361c6"} Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.917333 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mldwz" event={"ID":"c49d0579-4622-43ef-a28d-7cbf66ce5998","Type":"ContainerStarted","Data":"8c68e383a32bbb18d7a91ee088ab4226cb568d76d84a46d2ea225fc21680d443"} Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.920804 4925 generic.go:334] "Generic (PLEG): container finished" podID="19b1508e-f4b4-420f-abc7-d2c922cea0fc" containerID="b2823ed6edff922247848c887b713e0c501190ca353b4dd1bf1bc9ca85022ac5" exitCode=0 Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.920927 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbt62" event={"ID":"19b1508e-f4b4-420f-abc7-d2c922cea0fc","Type":"ContainerDied","Data":"b2823ed6edff922247848c887b713e0c501190ca353b4dd1bf1bc9ca85022ac5"} Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.922727 4925 generic.go:334] "Generic (PLEG): container finished" podID="d658610f-6e84-446d-9d81-e4e4198a6102" containerID="1310b2af629e1682b04ad97bc9ac7515635f568e160d59af805f4dc8ea99aa15" exitCode=0 Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.922771 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbvfx" event={"ID":"d658610f-6e84-446d-9d81-e4e4198a6102","Type":"ContainerDied","Data":"1310b2af629e1682b04ad97bc9ac7515635f568e160d59af805f4dc8ea99aa15"} Jan 21 11:03:25 crc kubenswrapper[4925]: I0121 11:03:25.922797 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbvfx" event={"ID":"d658610f-6e84-446d-9d81-e4e4198a6102","Type":"ContainerStarted","Data":"0af41a66ae64ab15b06eb12b074bf14c86efec3cc58ead25eaa5dedf2c3d9aef"} Jan 21 11:03:26 crc kubenswrapper[4925]: I0121 11:03:26.009164 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mldwz" podStartSLOduration=2.520313684 podStartE2EDuration="5.009135678s" podCreationTimestamp="2026-01-21 11:03:21 +0000 UTC" firstStartedPulling="2026-01-21 11:03:22.881760643 +0000 UTC m=+494.485652577" lastFinishedPulling="2026-01-21 11:03:25.370582637 +0000 UTC m=+496.974474571" observedRunningTime="2026-01-21 11:03:26.005072983 +0000 UTC m=+497.608964937" watchObservedRunningTime="2026-01-21 11:03:26.009135678 +0000 UTC m=+497.613027612" Jan 21 11:03:26 crc kubenswrapper[4925]: I0121 11:03:26.931995 4925 generic.go:334] "Generic (PLEG): container finished" podID="d658610f-6e84-446d-9d81-e4e4198a6102" containerID="a807fc83ca32a743430cee1a35aa8da3507fb36bb0398422b4e9cac9eeffdc78" exitCode=0 Jan 21 11:03:26 crc kubenswrapper[4925]: I0121 11:03:26.932071 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbvfx" event={"ID":"d658610f-6e84-446d-9d81-e4e4198a6102","Type":"ContainerDied","Data":"a807fc83ca32a743430cee1a35aa8da3507fb36bb0398422b4e9cac9eeffdc78"} Jan 21 11:03:26 crc kubenswrapper[4925]: I0121 11:03:26.946897 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xwrpb" event={"ID":"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426","Type":"ContainerStarted","Data":"f49697249422a159cd13b5bfb8d5655a75b684a4f2f7d5209f35e15ee619abad"} Jan 21 11:03:26 crc kubenswrapper[4925]: I0121 11:03:26.951752 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbt62" event={"ID":"19b1508e-f4b4-420f-abc7-d2c922cea0fc","Type":"ContainerStarted","Data":"0166ebea15f2e9ce99c8788bd893446113cb320edb561bd92011b513956e065a"} Jan 21 11:03:26 crc kubenswrapper[4925]: I0121 11:03:26.992752 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xbt62" podStartSLOduration=2.583234262 podStartE2EDuration="4.992723354s" podCreationTimestamp="2026-01-21 11:03:22 +0000 UTC" firstStartedPulling="2026-01-21 11:03:23.888688246 +0000 UTC m=+495.492580180" lastFinishedPulling="2026-01-21 11:03:26.298177348 +0000 UTC m=+497.902069272" observedRunningTime="2026-01-21 11:03:26.990536292 +0000 UTC m=+498.594428236" watchObservedRunningTime="2026-01-21 11:03:26.992723354 +0000 UTC m=+498.596615288" Jan 21 11:03:27 crc kubenswrapper[4925]: I0121 11:03:27.962346 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbvfx" event={"ID":"d658610f-6e84-446d-9d81-e4e4198a6102","Type":"ContainerStarted","Data":"a09dea5a3b231e91a5193b499c2070b24483765187d87ef81de98bec8906c69c"} Jan 21 11:03:27 crc kubenswrapper[4925]: I0121 11:03:27.968502 4925 generic.go:334] "Generic (PLEG): container finished" podID="c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426" containerID="f49697249422a159cd13b5bfb8d5655a75b684a4f2f7d5209f35e15ee619abad" exitCode=0 Jan 21 11:03:27 crc kubenswrapper[4925]: I0121 11:03:27.969596 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xwrpb" event={"ID":"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426","Type":"ContainerDied","Data":"f49697249422a159cd13b5bfb8d5655a75b684a4f2f7d5209f35e15ee619abad"} Jan 21 11:03:27 crc kubenswrapper[4925]: I0121 11:03:27.992916 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bbvfx" podStartSLOduration=2.300423623 podStartE2EDuration="3.992886972s" podCreationTimestamp="2026-01-21 11:03:24 +0000 UTC" firstStartedPulling="2026-01-21 11:03:25.923802098 +0000 UTC m=+497.527694032" lastFinishedPulling="2026-01-21 11:03:27.616265447 +0000 UTC m=+499.220157381" observedRunningTime="2026-01-21 11:03:27.986179398 +0000 UTC m=+499.590071322" watchObservedRunningTime="2026-01-21 11:03:27.992886972 +0000 UTC m=+499.596778906" Jan 21 11:03:29 crc kubenswrapper[4925]: I0121 11:03:29.984843 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xwrpb" event={"ID":"c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426","Type":"ContainerStarted","Data":"8e9d063da75049702aaec4d7db37b1a3b7fe54ef709a9fd97fc83d656200b537"} Jan 21 11:03:30 crc kubenswrapper[4925]: I0121 11:03:30.008172 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xwrpb" podStartSLOduration=4.496087507 podStartE2EDuration="7.008147624s" podCreationTimestamp="2026-01-21 11:03:23 +0000 UTC" firstStartedPulling="2026-01-21 11:03:25.911218609 +0000 UTC m=+497.515110543" lastFinishedPulling="2026-01-21 11:03:28.423278726 +0000 UTC m=+500.027170660" observedRunningTime="2026-01-21 11:03:30.005080142 +0000 UTC m=+501.608972086" watchObservedRunningTime="2026-01-21 11:03:30.008147624 +0000 UTC m=+501.612039558" Jan 21 11:03:31 crc kubenswrapper[4925]: I0121 11:03:31.458893 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:31 crc kubenswrapper[4925]: I0121 11:03:31.461507 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:32 crc kubenswrapper[4925]: I0121 11:03:32.447218 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:32 crc kubenswrapper[4925]: I0121 11:03:32.447786 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:32 crc kubenswrapper[4925]: I0121 11:03:32.496093 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:32 crc kubenswrapper[4925]: I0121 11:03:32.520955 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mldwz" podUID="c49d0579-4622-43ef-a28d-7cbf66ce5998" containerName="registry-server" probeResult="failure" output=< Jan 21 11:03:32 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:03:32 crc kubenswrapper[4925]: > Jan 21 11:03:33 crc kubenswrapper[4925]: I0121 11:03:33.051721 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xbt62" Jan 21 11:03:34 crc kubenswrapper[4925]: I0121 11:03:34.374359 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:34 crc kubenswrapper[4925]: I0121 11:03:34.374473 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:34 crc kubenswrapper[4925]: I0121 11:03:34.427611 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:34 crc kubenswrapper[4925]: I0121 11:03:34.866363 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:34 crc kubenswrapper[4925]: I0121 11:03:34.866546 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:34 crc kubenswrapper[4925]: I0121 11:03:34.917878 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:35 crc kubenswrapper[4925]: I0121 11:03:35.062901 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bbvfx" Jan 21 11:03:35 crc kubenswrapper[4925]: I0121 11:03:35.067630 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xwrpb" Jan 21 11:03:41 crc kubenswrapper[4925]: I0121 11:03:41.512581 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:41 crc kubenswrapper[4925]: I0121 11:03:41.567475 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mldwz" Jan 21 11:03:41 crc kubenswrapper[4925]: I0121 11:03:41.958267 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" podUID="0770d392-cbe7-4049-aa81-46d3892bc4a9" containerName="registry" containerID="cri-o://1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf" gracePeriod=30 Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.884096 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967246 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0770d392-cbe7-4049-aa81-46d3892bc4a9-ca-trust-extracted\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967366 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-bound-sa-token\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967670 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967744 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8hlp\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-kube-api-access-m8hlp\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967779 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-certificates\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967819 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-trusted-ca\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967941 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0770d392-cbe7-4049-aa81-46d3892bc4a9-installation-pull-secrets\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.967989 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-tls\") pod \"0770d392-cbe7-4049-aa81-46d3892bc4a9\" (UID: \"0770d392-cbe7-4049-aa81-46d3892bc4a9\") " Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.969828 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.969981 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.979127 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0770d392-cbe7-4049-aa81-46d3892bc4a9-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.979177 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.979969 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-kube-api-access-m8hlp" (OuterVolumeSpecName: "kube-api-access-m8hlp") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "kube-api-access-m8hlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.983931 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.985712 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:03:42 crc kubenswrapper[4925]: I0121 11:03:42.988725 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0770d392-cbe7-4049-aa81-46d3892bc4a9-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "0770d392-cbe7-4049-aa81-46d3892bc4a9" (UID: "0770d392-cbe7-4049-aa81-46d3892bc4a9"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.070124 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8hlp\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-kube-api-access-m8hlp\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.070216 4925 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.070280 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0770d392-cbe7-4049-aa81-46d3892bc4a9-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.070301 4925 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0770d392-cbe7-4049-aa81-46d3892bc4a9-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.070314 4925 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.070325 4925 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0770d392-cbe7-4049-aa81-46d3892bc4a9-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.070364 4925 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0770d392-cbe7-4049-aa81-46d3892bc4a9-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.072750 4925 generic.go:334] "Generic (PLEG): container finished" podID="0770d392-cbe7-4049-aa81-46d3892bc4a9" containerID="1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf" exitCode=0 Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.072822 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" event={"ID":"0770d392-cbe7-4049-aa81-46d3892bc4a9","Type":"ContainerDied","Data":"1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf"} Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.072846 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.072884 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-m7dl4" event={"ID":"0770d392-cbe7-4049-aa81-46d3892bc4a9","Type":"ContainerDied","Data":"04bbc8544d504fd9a331192d92ca8ab5db1ff8c08af71035057172db10f28a16"} Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.072912 4925 scope.go:117] "RemoveContainer" containerID="1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.103068 4925 scope.go:117] "RemoveContainer" containerID="1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf" Jan 21 11:03:43 crc kubenswrapper[4925]: E0121 11:03:43.104208 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf\": container with ID starting with 1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf not found: ID does not exist" containerID="1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.104303 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf"} err="failed to get container status \"1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf\": rpc error: code = NotFound desc = could not find container \"1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf\": container with ID starting with 1ef1c1a8fdac5402c990cb76a0804d1bf88c38d87f70d9f1d79c5e6af9357cdf not found: ID does not exist" Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.123206 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-m7dl4"] Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.127486 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-m7dl4"] Jan 21 11:03:43 crc kubenswrapper[4925]: I0121 11:03:43.512496 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0770d392-cbe7-4049-aa81-46d3892bc4a9" path="/var/lib/kubelet/pods/0770d392-cbe7-4049-aa81-46d3892bc4a9/volumes" Jan 21 11:05:49 crc kubenswrapper[4925]: I0121 11:05:49.941389 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:05:49 crc kubenswrapper[4925]: I0121 11:05:49.942458 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:06:19 crc kubenswrapper[4925]: I0121 11:06:19.941073 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:06:19 crc kubenswrapper[4925]: I0121 11:06:19.942060 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:06:49 crc kubenswrapper[4925]: I0121 11:06:49.941504 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:06:49 crc kubenswrapper[4925]: I0121 11:06:49.942528 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:06:49 crc kubenswrapper[4925]: I0121 11:06:49.942608 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:06:49 crc kubenswrapper[4925]: I0121 11:06:49.943527 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8ecb481e8ef1d0b6466c51999dd109a4671270510611a3058cedaf5fd5398994"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:06:49 crc kubenswrapper[4925]: I0121 11:06:49.943617 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://8ecb481e8ef1d0b6466c51999dd109a4671270510611a3058cedaf5fd5398994" gracePeriod=600 Jan 21 11:06:50 crc kubenswrapper[4925]: I0121 11:06:50.477224 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="8ecb481e8ef1d0b6466c51999dd109a4671270510611a3058cedaf5fd5398994" exitCode=0 Jan 21 11:06:50 crc kubenswrapper[4925]: I0121 11:06:50.477329 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"8ecb481e8ef1d0b6466c51999dd109a4671270510611a3058cedaf5fd5398994"} Jan 21 11:06:50 crc kubenswrapper[4925]: I0121 11:06:50.477466 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"e3f868ed9651e50a998c56f421dcf313de9cb0d8cc843ecff23b89cfae066e06"} Jan 21 11:06:50 crc kubenswrapper[4925]: I0121 11:06:50.477500 4925 scope.go:117] "RemoveContainer" containerID="7d15779caa6e5b388f79a4466fbe1abe55140d18037403d8c0435912eed61b60" Jan 21 11:08:51 crc kubenswrapper[4925]: I0121 11:08:51.499612 4925 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 21 11:09:19 crc kubenswrapper[4925]: I0121 11:09:19.941947 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:09:19 crc kubenswrapper[4925]: I0121 11:09:19.942947 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:09:49 crc kubenswrapper[4925]: I0121 11:09:49.941701 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:09:49 crc kubenswrapper[4925]: I0121 11:09:49.942656 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.175697 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh"] Jan 21 11:10:10 crc kubenswrapper[4925]: E0121 11:10:10.176981 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0770d392-cbe7-4049-aa81-46d3892bc4a9" containerName="registry" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.177001 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0770d392-cbe7-4049-aa81-46d3892bc4a9" containerName="registry" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.177155 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0770d392-cbe7-4049-aa81-46d3892bc4a9" containerName="registry" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.178492 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.181668 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.197151 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh"] Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.369162 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4qsc\" (UniqueName: \"kubernetes.io/projected/b8412bf3-79ed-4401-a927-e30a8a770afc-kube-api-access-r4qsc\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.369308 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.369483 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.470729 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4qsc\" (UniqueName: \"kubernetes.io/projected/b8412bf3-79ed-4401-a927-e30a8a770afc-kube-api-access-r4qsc\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.470859 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.470912 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.471701 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.471799 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.500546 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4qsc\" (UniqueName: \"kubernetes.io/projected/b8412bf3-79ed-4401-a927-e30a8a770afc-kube-api-access-r4qsc\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.669891 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:10 crc kubenswrapper[4925]: I0121 11:10:10.961108 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh"] Jan 21 11:10:11 crc kubenswrapper[4925]: I0121 11:10:11.388345 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" event={"ID":"b8412bf3-79ed-4401-a927-e30a8a770afc","Type":"ContainerStarted","Data":"4243e4ed6ea450ad8cd525f35815cd2368abfa3da644447209903f02fb8a7734"} Jan 21 11:10:11 crc kubenswrapper[4925]: I0121 11:10:11.388426 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" event={"ID":"b8412bf3-79ed-4401-a927-e30a8a770afc","Type":"ContainerStarted","Data":"ffad71ff2f05437162d95fd6acacb3d9b46a49dde988055fd4dda3a52054e932"} Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.398123 4925 generic.go:334] "Generic (PLEG): container finished" podID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerID="4243e4ed6ea450ad8cd525f35815cd2368abfa3da644447209903f02fb8a7734" exitCode=0 Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.398236 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" event={"ID":"b8412bf3-79ed-4401-a927-e30a8a770afc","Type":"ContainerDied","Data":"4243e4ed6ea450ad8cd525f35815cd2368abfa3da644447209903f02fb8a7734"} Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.400688 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.496802 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gwvr8"] Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.500240 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.513928 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gwvr8"] Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.603963 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-catalog-content\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.604039 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-utilities\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.604156 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjcl8\" (UniqueName: \"kubernetes.io/projected/6f744e39-b0b5-487d-80d8-7d8db370c838-kube-api-access-mjcl8\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.705377 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjcl8\" (UniqueName: \"kubernetes.io/projected/6f744e39-b0b5-487d-80d8-7d8db370c838-kube-api-access-mjcl8\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.705462 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-catalog-content\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.705499 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-utilities\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.706348 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-utilities\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.706353 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-catalog-content\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.736864 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjcl8\" (UniqueName: \"kubernetes.io/projected/6f744e39-b0b5-487d-80d8-7d8db370c838-kube-api-access-mjcl8\") pod \"redhat-operators-gwvr8\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:12 crc kubenswrapper[4925]: I0121 11:10:12.865487 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:13 crc kubenswrapper[4925]: I0121 11:10:13.699894 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gwvr8"] Jan 21 11:10:13 crc kubenswrapper[4925]: W0121 11:10:13.933204 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f744e39_b0b5_487d_80d8_7d8db370c838.slice/crio-df42456ec1226ad8333294c684c6666ae292936a7c41d795f2db6c0f3ee3b57f WatchSource:0}: Error finding container df42456ec1226ad8333294c684c6666ae292936a7c41d795f2db6c0f3ee3b57f: Status 404 returned error can't find the container with id df42456ec1226ad8333294c684c6666ae292936a7c41d795f2db6c0f3ee3b57f Jan 21 11:10:14 crc kubenswrapper[4925]: I0121 11:10:14.589526 4925 generic.go:334] "Generic (PLEG): container finished" podID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerID="f803edbb9ad609a420e4bb0d0eefb5728e1c9fe1ab7cb959aaf86d3d482dcabb" exitCode=0 Jan 21 11:10:14 crc kubenswrapper[4925]: I0121 11:10:14.589622 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" event={"ID":"b8412bf3-79ed-4401-a927-e30a8a770afc","Type":"ContainerDied","Data":"f803edbb9ad609a420e4bb0d0eefb5728e1c9fe1ab7cb959aaf86d3d482dcabb"} Jan 21 11:10:14 crc kubenswrapper[4925]: I0121 11:10:14.592194 4925 generic.go:334] "Generic (PLEG): container finished" podID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerID="4c6e9c80cb7bf091d844892d0c649626fcf5a777331aad2b78639ddac1eab4ce" exitCode=0 Jan 21 11:10:14 crc kubenswrapper[4925]: I0121 11:10:14.592245 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvr8" event={"ID":"6f744e39-b0b5-487d-80d8-7d8db370c838","Type":"ContainerDied","Data":"4c6e9c80cb7bf091d844892d0c649626fcf5a777331aad2b78639ddac1eab4ce"} Jan 21 11:10:14 crc kubenswrapper[4925]: I0121 11:10:14.592301 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvr8" event={"ID":"6f744e39-b0b5-487d-80d8-7d8db370c838","Type":"ContainerStarted","Data":"df42456ec1226ad8333294c684c6666ae292936a7c41d795f2db6c0f3ee3b57f"} Jan 21 11:10:15 crc kubenswrapper[4925]: I0121 11:10:15.670837 4925 generic.go:334] "Generic (PLEG): container finished" podID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerID="07af2a089be2ea9068e12313d3e29835ce18e589e7cf551770a67492962277ea" exitCode=0 Jan 21 11:10:15 crc kubenswrapper[4925]: I0121 11:10:15.671031 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" event={"ID":"b8412bf3-79ed-4401-a927-e30a8a770afc","Type":"ContainerDied","Data":"07af2a089be2ea9068e12313d3e29835ce18e589e7cf551770a67492962277ea"} Jan 21 11:10:15 crc kubenswrapper[4925]: I0121 11:10:15.677662 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvr8" event={"ID":"6f744e39-b0b5-487d-80d8-7d8db370c838","Type":"ContainerStarted","Data":"2496b9b10667474e68123c9b3ae1dc109d082953103a876b7d765ccc79e0d4f8"} Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.394483 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.399901 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-bundle\") pod \"b8412bf3-79ed-4401-a927-e30a8a770afc\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.399988 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4qsc\" (UniqueName: \"kubernetes.io/projected/b8412bf3-79ed-4401-a927-e30a8a770afc-kube-api-access-r4qsc\") pod \"b8412bf3-79ed-4401-a927-e30a8a770afc\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.400032 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-util\") pod \"b8412bf3-79ed-4401-a927-e30a8a770afc\" (UID: \"b8412bf3-79ed-4401-a927-e30a8a770afc\") " Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.403983 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-bundle" (OuterVolumeSpecName: "bundle") pod "b8412bf3-79ed-4401-a927-e30a8a770afc" (UID: "b8412bf3-79ed-4401-a927-e30a8a770afc"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.409225 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8412bf3-79ed-4401-a927-e30a8a770afc-kube-api-access-r4qsc" (OuterVolumeSpecName: "kube-api-access-r4qsc") pod "b8412bf3-79ed-4401-a927-e30a8a770afc" (UID: "b8412bf3-79ed-4401-a927-e30a8a770afc"). InnerVolumeSpecName "kube-api-access-r4qsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.417088 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-util" (OuterVolumeSpecName: "util") pod "b8412bf3-79ed-4401-a927-e30a8a770afc" (UID: "b8412bf3-79ed-4401-a927-e30a8a770afc"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.501168 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4qsc\" (UniqueName: \"kubernetes.io/projected/b8412bf3-79ed-4401-a927-e30a8a770afc-kube-api-access-r4qsc\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.501242 4925 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-util\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.501252 4925 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8412bf3-79ed-4401-a927-e30a8a770afc-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.629068 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9hk9g"] Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.629694 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-controller" containerID="cri-o://5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.629776 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.630026 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="northd" containerID="cri-o://61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.630087 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="nbdb" containerID="cri-o://ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.630173 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="sbdb" containerID="cri-o://eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.630231 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-node" containerID="cri-o://766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.630276 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-acl-logging" containerID="cri-o://90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.673700 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" containerID="cri-o://04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" gracePeriod=30 Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.724258 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" event={"ID":"b8412bf3-79ed-4401-a927-e30a8a770afc","Type":"ContainerDied","Data":"ffad71ff2f05437162d95fd6acacb3d9b46a49dde988055fd4dda3a52054e932"} Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.724314 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffad71ff2f05437162d95fd6acacb3d9b46a49dde988055fd4dda3a52054e932" Jan 21 11:10:18 crc kubenswrapper[4925]: I0121 11:10:18.724438 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh" Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.805652 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 is running failed: container process not found" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.806107 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 is running failed: container process not found" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.806630 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 is running failed: container process not found" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.806693 4925 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.810303 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.810540 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.811769 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.812491 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.813177 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.813223 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="nbdb" Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.819843 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Jan 21 11:10:18 crc kubenswrapper[4925]: E0121 11:10:18.819905 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="sbdb" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.320826 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/3.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.324833 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovn-acl-logging/0.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.325438 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovn-controller/0.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.326089 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392257 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-pfrnv"] Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392604 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kubecfg-setup" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392640 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kubecfg-setup" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392663 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392670 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392678 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392684 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392692 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="sbdb" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392699 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="sbdb" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392708 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392715 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392724 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="northd" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392735 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="northd" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392751 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerName="pull" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392759 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerName="pull" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392772 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-node" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392778 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-node" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392786 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerName="util" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392792 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerName="util" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392800 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-acl-logging" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392806 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-acl-logging" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392818 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392824 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392834 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerName="extract" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392843 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerName="extract" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392852 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-ovn-metrics" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392859 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-ovn-metrics" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.392868 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="nbdb" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.392874 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="nbdb" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393018 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-ovn-metrics" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393044 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8412bf3-79ed-4401-a927-e30a8a770afc" containerName="extract" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393053 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="sbdb" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393061 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393068 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="nbdb" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393075 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="kube-rbac-proxy-node" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393085 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-acl-logging" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393094 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393103 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovn-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393112 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393121 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393128 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="northd" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393134 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.393282 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393295 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: E0121 11:10:19.393303 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.393309 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerName="ovnkube-controller" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.408930 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.519210 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-bin\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.519313 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-systemd\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.519356 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520481 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-netns\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520532 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovn-node-metrics-cert\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520554 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-ovn-kubernetes\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520581 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520618 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-node-log\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520670 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-systemd-units\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520686 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-kubelet\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520708 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-config\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520722 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-netd\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520761 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-log-socket\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520791 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-openvswitch\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520815 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-ovn\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520839 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-slash\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520885 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-script-lib\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521015 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-env-overrides\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521047 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjd7c\" (UniqueName: \"kubernetes.io/projected/3a976857-73df-49d9-9b7e-b5cb3d250a5f-kube-api-access-cjd7c\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521070 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-var-lib-openvswitch\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521091 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-etc-openvswitch\") pod \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\" (UID: \"3a976857-73df-49d9-9b7e-b5cb3d250a5f\") " Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.520571 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521348 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-cni-netd\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521358 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521409 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-node-log" (OuterVolumeSpecName: "node-log") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521222 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521427 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-systemd\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521448 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-systemd-units\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521472 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521507 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521527 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovn-node-metrics-cert\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521544 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-run-ovn-kubernetes\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521584 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-etc-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521627 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-kubelet\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521643 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-ovn\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521673 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-env-overrides\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521707 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovnkube-config\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521728 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-cni-bin\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521743 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-var-lib-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521767 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-slash\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521782 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-run-netns\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521799 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-node-log\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521824 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovnkube-script-lib\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521290 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521312 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521303 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-slash" (OuterVolumeSpecName: "host-slash") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521339 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-log-socket" (OuterVolumeSpecName: "log-socket") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521444 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521458 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521437 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521787 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521877 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77jd7\" (UniqueName: \"kubernetes.io/projected/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-kube-api-access-77jd7\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.521930 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522040 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522202 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-log-socket\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522299 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522484 4925 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-node-log\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522508 4925 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522683 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522687 4925 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522765 4925 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522776 4925 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.522786 4925 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.524616 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.525616 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a976857-73df-49d9-9b7e-b5cb3d250a5f-kube-api-access-cjd7c" (OuterVolumeSpecName: "kube-api-access-cjd7c") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "kube-api-access-cjd7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.535354 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "3a976857-73df-49d9-9b7e-b5cb3d250a5f" (UID: "3a976857-73df-49d9-9b7e-b5cb3d250a5f"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624317 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-ovn\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624481 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-ovn\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624547 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-env-overrides\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624631 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovnkube-config\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624691 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-cni-bin\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624748 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-var-lib-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624775 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-slash\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624816 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-cni-bin\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624824 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-run-netns\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624886 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-node-log\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624877 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-run-netns\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624915 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovnkube-script-lib\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624934 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77jd7\" (UniqueName: \"kubernetes.io/projected/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-kube-api-access-77jd7\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624945 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-var-lib-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625041 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-node-log\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625000 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-slash\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.624989 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-log-socket\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625124 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-cni-netd\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625201 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-systemd-units\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625239 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-systemd\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625286 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625345 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625377 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovn-node-metrics-cert\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625435 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-run-ovn-kubernetes\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625503 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-etc-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625572 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-kubelet\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625661 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjd7c\" (UniqueName: \"kubernetes.io/projected/3a976857-73df-49d9-9b7e-b5cb3d250a5f-kube-api-access-cjd7c\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625676 4925 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625689 4925 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625701 4925 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625715 4925 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625729 4925 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625744 4925 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625761 4925 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625773 4925 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625785 4925 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-log-socket\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625798 4925 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625809 4925 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a976857-73df-49d9-9b7e-b5cb3d250a5f-host-slash\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625822 4925 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625834 4925 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a976857-73df-49d9-9b7e-b5cb3d250a5f-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625851 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovnkube-config\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625872 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-kubelet\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625855 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-env-overrides\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625128 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-log-socket\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625899 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625916 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625932 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-systemd-units\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.625965 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-cni-netd\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.626009 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-host-run-ovn-kubernetes\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.626011 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-run-systemd\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.626042 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-etc-openvswitch\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.626221 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovnkube-script-lib\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.629996 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-ovn-node-metrics-cert\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.928679 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77jd7\" (UniqueName: \"kubernetes.io/projected/0ca4638f-b097-4f42-9a44-c429d8c3e6b2-kube-api-access-77jd7\") pod \"ovnkube-node-pfrnv\" (UID: \"0ca4638f-b097-4f42-9a44-c429d8c3e6b2\") " pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.933893 4925 generic.go:334] "Generic (PLEG): container finished" podID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerID="2496b9b10667474e68123c9b3ae1dc109d082953103a876b7d765ccc79e0d4f8" exitCode=0 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.933977 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvr8" event={"ID":"6f744e39-b0b5-487d-80d8-7d8db370c838","Type":"ContainerDied","Data":"2496b9b10667474e68123c9b3ae1dc109d082953103a876b7d765ccc79e0d4f8"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.937415 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/2.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.938155 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/1.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.938480 4925 generic.go:334] "Generic (PLEG): container finished" podID="82b678c3-b1e1-4294-9f9f-02103a6823cc" containerID="429dfd6605e5b4b9683bdedbdf8361a34f0bc590cc74d63cb513a31aaca12791" exitCode=2 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.938691 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerDied","Data":"429dfd6605e5b4b9683bdedbdf8361a34f0bc590cc74d63cb513a31aaca12791"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.938784 4925 scope.go:117] "RemoveContainer" containerID="61fdaae1dfc971ecf28e4f52444ec64f2ec0c9d7cdf79e6736ba9677c1bd7b22" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.939969 4925 scope.go:117] "RemoveContainer" containerID="429dfd6605e5b4b9683bdedbdf8361a34f0bc590cc74d63cb513a31aaca12791" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.940620 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.940692 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.940751 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.941725 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e3f868ed9651e50a998c56f421dcf313de9cb0d8cc843ecff23b89cfae066e06"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.941793 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://e3f868ed9651e50a998c56f421dcf313de9cb0d8cc843ecff23b89cfae066e06" gracePeriod=600 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.943916 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovnkube-controller/3.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.949549 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovn-acl-logging/0.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957107 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9hk9g_3a976857-73df-49d9-9b7e-b5cb3d250a5f/ovn-controller/0.log" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957831 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" exitCode=0 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957870 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" exitCode=0 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957880 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" exitCode=0 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957887 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" exitCode=0 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957896 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" exitCode=0 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957909 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" exitCode=0 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957918 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" exitCode=143 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957927 4925 generic.go:334] "Generic (PLEG): container finished" podID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" containerID="5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" exitCode=143 Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957952 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.957995 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958013 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958028 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958042 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958057 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958073 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958088 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958096 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958164 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958173 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958205 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958212 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958219 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958227 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958233 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958244 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958263 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958291 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958299 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958324 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958332 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958348 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958361 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958375 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958387 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958414 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958423 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958440 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958449 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958463 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958468 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958474 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958480 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958496 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958503 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958509 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958515 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958523 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" event={"ID":"3a976857-73df-49d9-9b7e-b5cb3d250a5f","Type":"ContainerDied","Data":"1bcbdfb70b58bca9fbbc0f2d4d5705d025f9e4b48e99d9c0511d73ac2ead5ce3"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958531 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958538 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958543 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958549 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958555 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958561 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958568 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958572 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958577 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958582 4925 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.958720 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9hk9g" Jan 21 11:10:19 crc kubenswrapper[4925]: I0121 11:10:19.998083 4925 scope.go:117] "RemoveContainer" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.030579 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.037834 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9hk9g"] Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.038144 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.040852 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9hk9g"] Jan 21 11:10:20 crc kubenswrapper[4925]: W0121 11:10:20.071571 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ca4638f_b097_4f42_9a44_c429d8c3e6b2.slice/crio-0622d7884317977b74ab60ae6b43ce7ca6ef90e8a11e45b69f091b97b16ffb78 WatchSource:0}: Error finding container 0622d7884317977b74ab60ae6b43ce7ca6ef90e8a11e45b69f091b97b16ffb78: Status 404 returned error can't find the container with id 0622d7884317977b74ab60ae6b43ce7ca6ef90e8a11e45b69f091b97b16ffb78 Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.081026 4925 scope.go:117] "RemoveContainer" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.108771 4925 scope.go:117] "RemoveContainer" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.132797 4925 scope.go:117] "RemoveContainer" containerID="61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.149624 4925 scope.go:117] "RemoveContainer" containerID="74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.173692 4925 scope.go:117] "RemoveContainer" containerID="766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.206644 4925 scope.go:117] "RemoveContainer" containerID="90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.223873 4925 scope.go:117] "RemoveContainer" containerID="5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.239752 4925 scope.go:117] "RemoveContainer" containerID="103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.259503 4925 scope.go:117] "RemoveContainer" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.260171 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": container with ID starting with 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 not found: ID does not exist" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.260236 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} err="failed to get container status \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": rpc error: code = NotFound desc = could not find container \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": container with ID starting with 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.260273 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.260718 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": container with ID starting with c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288 not found: ID does not exist" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.260788 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} err="failed to get container status \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": rpc error: code = NotFound desc = could not find container \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": container with ID starting with c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.260834 4925 scope.go:117] "RemoveContainer" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.261146 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": container with ID starting with eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1 not found: ID does not exist" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.261181 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} err="failed to get container status \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": rpc error: code = NotFound desc = could not find container \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": container with ID starting with eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.261198 4925 scope.go:117] "RemoveContainer" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.261505 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": container with ID starting with ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83 not found: ID does not exist" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.261534 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} err="failed to get container status \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": rpc error: code = NotFound desc = could not find container \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": container with ID starting with ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.261550 4925 scope.go:117] "RemoveContainer" containerID="61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.261745 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": container with ID starting with 61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79 not found: ID does not exist" containerID="61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.261898 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} err="failed to get container status \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": rpc error: code = NotFound desc = could not find container \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": container with ID starting with 61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.261927 4925 scope.go:117] "RemoveContainer" containerID="74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.262247 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": container with ID starting with 74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191 not found: ID does not exist" containerID="74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.262342 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} err="failed to get container status \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": rpc error: code = NotFound desc = could not find container \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": container with ID starting with 74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.262376 4925 scope.go:117] "RemoveContainer" containerID="766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.262630 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": container with ID starting with 766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e not found: ID does not exist" containerID="766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.262667 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} err="failed to get container status \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": rpc error: code = NotFound desc = could not find container \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": container with ID starting with 766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.262683 4925 scope.go:117] "RemoveContainer" containerID="90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.262959 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": container with ID starting with 90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc not found: ID does not exist" containerID="90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.262979 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} err="failed to get container status \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": rpc error: code = NotFound desc = could not find container \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": container with ID starting with 90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.262995 4925 scope.go:117] "RemoveContainer" containerID="5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.263415 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": container with ID starting with 5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c not found: ID does not exist" containerID="5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.263479 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} err="failed to get container status \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": rpc error: code = NotFound desc = could not find container \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": container with ID starting with 5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.263510 4925 scope.go:117] "RemoveContainer" containerID="103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3" Jan 21 11:10:20 crc kubenswrapper[4925]: E0121 11:10:20.263803 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": container with ID starting with 103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3 not found: ID does not exist" containerID="103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.263831 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} err="failed to get container status \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": rpc error: code = NotFound desc = could not find container \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": container with ID starting with 103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.263970 4925 scope.go:117] "RemoveContainer" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.264465 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} err="failed to get container status \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": rpc error: code = NotFound desc = could not find container \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": container with ID starting with 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.264506 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.264910 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} err="failed to get container status \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": rpc error: code = NotFound desc = could not find container \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": container with ID starting with c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.264936 4925 scope.go:117] "RemoveContainer" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.265529 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} err="failed to get container status \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": rpc error: code = NotFound desc = could not find container \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": container with ID starting with eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.265553 4925 scope.go:117] "RemoveContainer" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.267409 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} err="failed to get container status \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": rpc error: code = NotFound desc = could not find container \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": container with ID starting with ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.267443 4925 scope.go:117] "RemoveContainer" containerID="61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.267828 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} err="failed to get container status \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": rpc error: code = NotFound desc = could not find container \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": container with ID starting with 61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.267865 4925 scope.go:117] "RemoveContainer" containerID="74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.268441 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} err="failed to get container status \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": rpc error: code = NotFound desc = could not find container \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": container with ID starting with 74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.268520 4925 scope.go:117] "RemoveContainer" containerID="766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.268969 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} err="failed to get container status \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": rpc error: code = NotFound desc = could not find container \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": container with ID starting with 766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.269006 4925 scope.go:117] "RemoveContainer" containerID="90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.269358 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} err="failed to get container status \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": rpc error: code = NotFound desc = could not find container \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": container with ID starting with 90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.269388 4925 scope.go:117] "RemoveContainer" containerID="5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.269732 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} err="failed to get container status \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": rpc error: code = NotFound desc = could not find container \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": container with ID starting with 5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.269779 4925 scope.go:117] "RemoveContainer" containerID="103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.270058 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} err="failed to get container status \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": rpc error: code = NotFound desc = could not find container \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": container with ID starting with 103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.270080 4925 scope.go:117] "RemoveContainer" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.271591 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} err="failed to get container status \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": rpc error: code = NotFound desc = could not find container \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": container with ID starting with 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.271666 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.272021 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} err="failed to get container status \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": rpc error: code = NotFound desc = could not find container \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": container with ID starting with c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.272058 4925 scope.go:117] "RemoveContainer" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.272433 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} err="failed to get container status \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": rpc error: code = NotFound desc = could not find container \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": container with ID starting with eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.272467 4925 scope.go:117] "RemoveContainer" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.272905 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} err="failed to get container status \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": rpc error: code = NotFound desc = could not find container \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": container with ID starting with ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.272968 4925 scope.go:117] "RemoveContainer" containerID="61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.273222 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} err="failed to get container status \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": rpc error: code = NotFound desc = could not find container \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": container with ID starting with 61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.273246 4925 scope.go:117] "RemoveContainer" containerID="74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.273554 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} err="failed to get container status \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": rpc error: code = NotFound desc = could not find container \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": container with ID starting with 74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.273622 4925 scope.go:117] "RemoveContainer" containerID="766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.274160 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} err="failed to get container status \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": rpc error: code = NotFound desc = could not find container \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": container with ID starting with 766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.274215 4925 scope.go:117] "RemoveContainer" containerID="90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.274590 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} err="failed to get container status \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": rpc error: code = NotFound desc = could not find container \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": container with ID starting with 90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.274627 4925 scope.go:117] "RemoveContainer" containerID="5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.274949 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} err="failed to get container status \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": rpc error: code = NotFound desc = could not find container \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": container with ID starting with 5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.274996 4925 scope.go:117] "RemoveContainer" containerID="103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.275442 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} err="failed to get container status \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": rpc error: code = NotFound desc = could not find container \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": container with ID starting with 103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.275515 4925 scope.go:117] "RemoveContainer" containerID="04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.276042 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5"} err="failed to get container status \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": rpc error: code = NotFound desc = could not find container \"04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5\": container with ID starting with 04fa22f500c335e2004527a5250a63a7df81a2c89d8f015ed90ce336d671b9a5 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.276108 4925 scope.go:117] "RemoveContainer" containerID="c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.276646 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288"} err="failed to get container status \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": rpc error: code = NotFound desc = could not find container \"c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288\": container with ID starting with c33e8c8c3d8639b03202811df9084b7992b6b1e729892d434906ac42f249f288 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.276732 4925 scope.go:117] "RemoveContainer" containerID="eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.277489 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1"} err="failed to get container status \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": rpc error: code = NotFound desc = could not find container \"eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1\": container with ID starting with eb7cb2674e97f0cd5707e628c7a68455505b8d7a4f2b437b0d32aefcac78b2d1 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.277536 4925 scope.go:117] "RemoveContainer" containerID="ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.277886 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83"} err="failed to get container status \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": rpc error: code = NotFound desc = could not find container \"ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83\": container with ID starting with ceeaf177d2be43c531752d5da21cdfabc7a16a9beadc87405281d370690abe83 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.277934 4925 scope.go:117] "RemoveContainer" containerID="61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.278301 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79"} err="failed to get container status \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": rpc error: code = NotFound desc = could not find container \"61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79\": container with ID starting with 61bb215979ec50499aadef4dbb7c1ce957c380fa294a3fa786ce5ccfa6335d79 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.278339 4925 scope.go:117] "RemoveContainer" containerID="74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.278848 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191"} err="failed to get container status \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": rpc error: code = NotFound desc = could not find container \"74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191\": container with ID starting with 74dcef2f6818479f4393d4199865090c0ce8b7788477f486542206ccc2a33191 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.278933 4925 scope.go:117] "RemoveContainer" containerID="766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.279363 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e"} err="failed to get container status \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": rpc error: code = NotFound desc = could not find container \"766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e\": container with ID starting with 766526209e0085b6b7322769f94bb89a415e2c57928f47fb3b319d1d647f274e not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.279453 4925 scope.go:117] "RemoveContainer" containerID="90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.279844 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc"} err="failed to get container status \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": rpc error: code = NotFound desc = could not find container \"90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc\": container with ID starting with 90dafbd14665ed02aef3420fb7a17a91ae5788f00e49cb6012f7299d3e1901bc not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.279876 4925 scope.go:117] "RemoveContainer" containerID="5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.280256 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c"} err="failed to get container status \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": rpc error: code = NotFound desc = could not find container \"5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c\": container with ID starting with 5f55123895bc56345839cbbc5195284b563a0ca092ffb6c5cdbd16d71b5a079c not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.280314 4925 scope.go:117] "RemoveContainer" containerID="103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.280914 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3"} err="failed to get container status \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": rpc error: code = NotFound desc = could not find container \"103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3\": container with ID starting with 103966983ffee677652269e67fdee60574d924368099f9606028b19ec8d85fc3 not found: ID does not exist" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.969979 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="e3f868ed9651e50a998c56f421dcf313de9cb0d8cc843ecff23b89cfae066e06" exitCode=0 Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.970047 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"e3f868ed9651e50a998c56f421dcf313de9cb0d8cc843ecff23b89cfae066e06"} Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.970149 4925 scope.go:117] "RemoveContainer" containerID="8ecb481e8ef1d0b6466c51999dd109a4671270510611a3058cedaf5fd5398994" Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.973509 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"0622d7884317977b74ab60ae6b43ce7ca6ef90e8a11e45b69f091b97b16ffb78"} Jan 21 11:10:20 crc kubenswrapper[4925]: I0121 11:10:20.975815 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/2.log" Jan 21 11:10:21 crc kubenswrapper[4925]: I0121 11:10:21.519633 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a976857-73df-49d9-9b7e-b5cb3d250a5f" path="/var/lib/kubelet/pods/3a976857-73df-49d9-9b7e-b5cb3d250a5f/volumes" Jan 21 11:10:21 crc kubenswrapper[4925]: I0121 11:10:21.988435 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/2.log" Jan 21 11:10:21 crc kubenswrapper[4925]: I0121 11:10:21.988545 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hwzqb" event={"ID":"82b678c3-b1e1-4294-9f9f-02103a6823cc","Type":"ContainerStarted","Data":"c8cf2c422f39e0465092de5100b728050e88c8e77f8ca4761f8806fbf160d986"} Jan 21 11:10:21 crc kubenswrapper[4925]: I0121 11:10:21.998991 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"e772253e4c2e0ac8edf4468d742ee24fdcac170b16df83d5dd4bb209eb0b7a25"} Jan 21 11:10:22 crc kubenswrapper[4925]: I0121 11:10:22.003782 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"185ef89f6cdc5b034fd99c9fbdd47766a2ab54f8342e9415a0dc047394aec73a"} Jan 21 11:10:24 crc kubenswrapper[4925]: I0121 11:10:24.187424 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvr8" event={"ID":"6f744e39-b0b5-487d-80d8-7d8db370c838","Type":"ContainerStarted","Data":"b819e953e39eb3cd05b0fb93a5bfd61ffd987610715a209467c50d20f5659b64"} Jan 21 11:10:24 crc kubenswrapper[4925]: I0121 11:10:24.194517 4925 generic.go:334] "Generic (PLEG): container finished" podID="0ca4638f-b097-4f42-9a44-c429d8c3e6b2" containerID="185ef89f6cdc5b034fd99c9fbdd47766a2ab54f8342e9415a0dc047394aec73a" exitCode=0 Jan 21 11:10:24 crc kubenswrapper[4925]: I0121 11:10:24.194576 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerDied","Data":"185ef89f6cdc5b034fd99c9fbdd47766a2ab54f8342e9415a0dc047394aec73a"} Jan 21 11:10:24 crc kubenswrapper[4925]: I0121 11:10:24.235204 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gwvr8" podStartSLOduration=4.912858666 podStartE2EDuration="12.235144231s" podCreationTimestamp="2026-01-21 11:10:12 +0000 UTC" firstStartedPulling="2026-01-21 11:10:14.593732624 +0000 UTC m=+906.197624558" lastFinishedPulling="2026-01-21 11:10:21.916018189 +0000 UTC m=+913.519910123" observedRunningTime="2026-01-21 11:10:24.225085514 +0000 UTC m=+915.828977458" watchObservedRunningTime="2026-01-21 11:10:24.235144231 +0000 UTC m=+915.839036165" Jan 21 11:10:25 crc kubenswrapper[4925]: I0121 11:10:25.221306 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"370642eda8bd055f086dc06bf944d1b0efd731a3cbd1b79614d093221e756de1"} Jan 21 11:10:26 crc kubenswrapper[4925]: I0121 11:10:26.242592 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"f49d74be4d5a79eaef27e28325ece2f8811b0dc1313beeb000d84eabef8ab27a"} Jan 21 11:10:26 crc kubenswrapper[4925]: I0121 11:10:26.243210 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"a9799d0e2a4c863b4a390cbbe430fa2c49e133f281e6724c3e40b5b4d62b3e93"} Jan 21 11:10:27 crc kubenswrapper[4925]: I0121 11:10:27.542378 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"a5d8607cede36b40ed41ee647d701b00b3b2dc44384aa3c4f2e9c15f9c2da89e"} Jan 21 11:10:27 crc kubenswrapper[4925]: I0121 11:10:27.542853 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"bc510ba4a4192a3174744e69f74c3f68068bef59afa423a4c5961990bc614339"} Jan 21 11:10:28 crc kubenswrapper[4925]: I0121 11:10:28.754222 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"e2ad83e689496c5ec59aa94e0ab4738b2eb7d02d05d18509e7608647a05a7f90"} Jan 21 11:10:31 crc kubenswrapper[4925]: I0121 11:10:31.392894 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"820fd8a3395c877db0ff32a9cad3e6e39982ddce010f80cefbe5b9cca808779a"} Jan 21 11:10:32 crc kubenswrapper[4925]: I0121 11:10:32.910885 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:32 crc kubenswrapper[4925]: I0121 11:10:32.912086 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.558092 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvr8" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="registry-server" probeResult="failure" output=< Jan 21 11:10:34 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:10:34 crc kubenswrapper[4925]: > Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.611465 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" event={"ID":"0ca4638f-b097-4f42-9a44-c429d8c3e6b2","Type":"ContainerStarted","Data":"6d3f53c40d2b1d6da1ffdffc064d207f11f1d667524d3154396a7ebdf40cc3df"} Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.612632 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.612924 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.613012 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.687660 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.692834 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:34 crc kubenswrapper[4925]: I0121 11:10:34.864911 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" podStartSLOduration=15.864871158 podStartE2EDuration="15.864871158s" podCreationTimestamp="2026-01-21 11:10:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:10:34.862317318 +0000 UTC m=+926.466209262" watchObservedRunningTime="2026-01-21 11:10:34.864871158 +0000 UTC m=+926.468763112" Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.358166 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml"] Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.360241 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.363256 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-fzbvx" Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.363699 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.368169 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.435072 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8qh8\" (UniqueName: \"kubernetes.io/projected/5331ad9e-1914-414a-a7b2-b52eb191ba2f-kube-api-access-b8qh8\") pod \"obo-prometheus-operator-68bc856cb9-x48ml\" (UID: \"5331ad9e-1914-414a-a7b2-b52eb191ba2f\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.512654 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf"] Jan 21 11:10:38 crc kubenswrapper[4925]: I0121 11:10:38.513925 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:38.539115 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8qh8\" (UniqueName: \"kubernetes.io/projected/5331ad9e-1914-414a-a7b2-b52eb191ba2f-kube-api-access-b8qh8\") pod \"obo-prometheus-operator-68bc856cb9-x48ml\" (UID: \"5331ad9e-1914-414a-a7b2-b52eb191ba2f\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:38.641663 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/28aa0136-6b61-4a88-907d-265c48e36f08-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf\" (UID: \"28aa0136-6b61-4a88-907d-265c48e36f08\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:38.641970 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/28aa0136-6b61-4a88-907d-265c48e36f08-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf\" (UID: \"28aa0136-6b61-4a88-907d-265c48e36f08\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.097384 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/28aa0136-6b61-4a88-907d-265c48e36f08-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf\" (UID: \"28aa0136-6b61-4a88-907d-265c48e36f08\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.097482 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/28aa0136-6b61-4a88-907d-265c48e36f08-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf\" (UID: \"28aa0136-6b61-4a88-907d-265c48e36f08\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.118274 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.134024 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-pgp59" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.159259 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw"] Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.166972 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/28aa0136-6b61-4a88-907d-265c48e36f08-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf\" (UID: \"28aa0136-6b61-4a88-907d-265c48e36f08\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.178086 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.178122 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/28aa0136-6b61-4a88-907d-265c48e36f08-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf\" (UID: \"28aa0136-6b61-4a88-907d-265c48e36f08\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.198505 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea8b2f0b-f77a-4737-be37-3268437871d9-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw\" (UID: \"ea8b2f0b-f77a-4737-be37-3268437871d9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.198961 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea8b2f0b-f77a-4737-be37-3268437871d9-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw\" (UID: \"ea8b2f0b-f77a-4737-be37-3268437871d9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.200617 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8qh8\" (UniqueName: \"kubernetes.io/projected/5331ad9e-1914-414a-a7b2-b52eb191ba2f-kube-api-access-b8qh8\") pod \"obo-prometheus-operator-68bc856cb9-x48ml\" (UID: \"5331ad9e-1914-414a-a7b2-b52eb191ba2f\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.285891 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.300615 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea8b2f0b-f77a-4737-be37-3268437871d9-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw\" (UID: \"ea8b2f0b-f77a-4737-be37-3268437871d9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.301167 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea8b2f0b-f77a-4737-be37-3268437871d9-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw\" (UID: \"ea8b2f0b-f77a-4737-be37-3268437871d9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.349054 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea8b2f0b-f77a-4737-be37-3268437871d9-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw\" (UID: \"ea8b2f0b-f77a-4737-be37-3268437871d9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.349270 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea8b2f0b-f77a-4737-be37-3268437871d9-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw\" (UID: \"ea8b2f0b-f77a-4737-be37-3268437871d9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.382876 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.427708 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(93e703a9fa9bc6780684e20fe51e86ef6f3fdfd6920315ce6f0510c1435f3dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.427848 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(93e703a9fa9bc6780684e20fe51e86ef6f3fdfd6920315ce6f0510c1435f3dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.427904 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(93e703a9fa9bc6780684e20fe51e86ef6f3fdfd6920315ce6f0510c1435f3dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.428006 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators(5331ad9e-1914-414a-a7b2-b52eb191ba2f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators(5331ad9e-1914-414a-a7b2-b52eb191ba2f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(93e703a9fa9bc6780684e20fe51e86ef6f3fdfd6920315ce6f0510c1435f3dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" podUID="5331ad9e-1914-414a-a7b2-b52eb191ba2f" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.465915 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.482729 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(3dee8ceba349efd5e22d137600a8afb18bf70a6c65fa281529b4f777c5d0e018): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.482858 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(3dee8ceba349efd5e22d137600a8afb18bf70a6c65fa281529b4f777c5d0e018): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.482896 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(3dee8ceba349efd5e22d137600a8afb18bf70a6c65fa281529b4f777c5d0e018): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.482970 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators(ea8b2f0b-f77a-4737-be37-3268437871d9)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators(ea8b2f0b-f77a-4737-be37-3268437871d9)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(3dee8ceba349efd5e22d137600a8afb18bf70a6c65fa281529b4f777c5d0e018): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" podUID="ea8b2f0b-f77a-4737-be37-3268437871d9" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.552806 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(be69679ce72531a72ee231e1c4e19ab577ec2c174ef2097d2eb88ad4a9af2a3f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.552944 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(be69679ce72531a72ee231e1c4e19ab577ec2c174ef2097d2eb88ad4a9af2a3f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.552982 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(be69679ce72531a72ee231e1c4e19ab577ec2c174ef2097d2eb88ad4a9af2a3f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:39 crc kubenswrapper[4925]: E0121 11:10:39.553052 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators(28aa0136-6b61-4a88-907d-265c48e36f08)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators(28aa0136-6b61-4a88-907d-265c48e36f08)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(be69679ce72531a72ee231e1c4e19ab577ec2c174ef2097d2eb88ad4a9af2a3f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" podUID="28aa0136-6b61-4a88-907d-265c48e36f08" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.638624 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-jvt9p"] Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.639722 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.646387 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-526dd" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.646988 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.712333 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e052dc8b-2520-4757-bb0a-d1350ad44b08-observability-operator-tls\") pod \"observability-operator-59bdc8b94-jvt9p\" (UID: \"e052dc8b-2520-4757-bb0a-d1350ad44b08\") " pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:39 crc kubenswrapper[4925]: I0121 11:10:39.712895 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8chrw\" (UniqueName: \"kubernetes.io/projected/e052dc8b-2520-4757-bb0a-d1350ad44b08-kube-api-access-8chrw\") pod \"observability-operator-59bdc8b94-jvt9p\" (UID: \"e052dc8b-2520-4757-bb0a-d1350ad44b08\") " pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.341166 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e052dc8b-2520-4757-bb0a-d1350ad44b08-observability-operator-tls\") pod \"observability-operator-59bdc8b94-jvt9p\" (UID: \"e052dc8b-2520-4757-bb0a-d1350ad44b08\") " pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.341265 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8chrw\" (UniqueName: \"kubernetes.io/projected/e052dc8b-2520-4757-bb0a-d1350ad44b08-kube-api-access-8chrw\") pod \"observability-operator-59bdc8b94-jvt9p\" (UID: \"e052dc8b-2520-4757-bb0a-d1350ad44b08\") " pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.347636 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e052dc8b-2520-4757-bb0a-d1350ad44b08-observability-operator-tls\") pod \"observability-operator-59bdc8b94-jvt9p\" (UID: \"e052dc8b-2520-4757-bb0a-d1350ad44b08\") " pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.440895 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8chrw\" (UniqueName: \"kubernetes.io/projected/e052dc8b-2520-4757-bb0a-d1350ad44b08-kube-api-access-8chrw\") pod \"observability-operator-59bdc8b94-jvt9p\" (UID: \"e052dc8b-2520-4757-bb0a-d1350ad44b08\") " pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.451708 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-655nk"] Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.452963 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.465663 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-rk8qn" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.573046 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.606383 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(a5e93137cbfc8132da09df046def25d033d32643f939dc0144d363c356d52287): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.606528 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(a5e93137cbfc8132da09df046def25d033d32643f939dc0144d363c356d52287): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.606572 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(a5e93137cbfc8132da09df046def25d033d32643f939dc0144d363c356d52287): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.606650 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-jvt9p_openshift-operators(e052dc8b-2520-4757-bb0a-d1350ad44b08)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-jvt9p_openshift-operators(e052dc8b-2520-4757-bb0a-d1350ad44b08)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(a5e93137cbfc8132da09df046def25d033d32643f939dc0144d363c356d52287): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" podUID="e052dc8b-2520-4757-bb0a-d1350ad44b08" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.646693 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/0d031a33-73a8-45d7-9979-e1266d9e7be7-openshift-service-ca\") pod \"perses-operator-5bf474d74f-655nk\" (UID: \"0d031a33-73a8-45d7-9979-e1266d9e7be7\") " pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.646771 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc62z\" (UniqueName: \"kubernetes.io/projected/0d031a33-73a8-45d7-9979-e1266d9e7be7-kube-api-access-sc62z\") pod \"perses-operator-5bf474d74f-655nk\" (UID: \"0d031a33-73a8-45d7-9979-e1266d9e7be7\") " pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.690967 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml"] Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.691240 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.692148 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.695642 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf"] Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.695965 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.698024 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.731432 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-655nk"] Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.748335 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/0d031a33-73a8-45d7-9979-e1266d9e7be7-openshift-service-ca\") pod \"perses-operator-5bf474d74f-655nk\" (UID: \"0d031a33-73a8-45d7-9979-e1266d9e7be7\") " pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.748432 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc62z\" (UniqueName: \"kubernetes.io/projected/0d031a33-73a8-45d7-9979-e1266d9e7be7-kube-api-access-sc62z\") pod \"perses-operator-5bf474d74f-655nk\" (UID: \"0d031a33-73a8-45d7-9979-e1266d9e7be7\") " pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.750200 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/0d031a33-73a8-45d7-9979-e1266d9e7be7-openshift-service-ca\") pod \"perses-operator-5bf474d74f-655nk\" (UID: \"0d031a33-73a8-45d7-9979-e1266d9e7be7\") " pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.771509 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-jvt9p"] Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.771934 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw"] Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.772213 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.773223 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.777985 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(e77a3055883074e1aff5965374e32898258557ee12ec7aaaed2cf03894c86c31): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.778128 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(e77a3055883074e1aff5965374e32898258557ee12ec7aaaed2cf03894c86c31): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.778165 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(e77a3055883074e1aff5965374e32898258557ee12ec7aaaed2cf03894c86c31): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.778241 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators(5331ad9e-1914-414a-a7b2-b52eb191ba2f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators(5331ad9e-1914-414a-a7b2-b52eb191ba2f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-x48ml_openshift-operators_5331ad9e-1914-414a-a7b2-b52eb191ba2f_0(e77a3055883074e1aff5965374e32898258557ee12ec7aaaed2cf03894c86c31): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" podUID="5331ad9e-1914-414a-a7b2-b52eb191ba2f" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.782035 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(cdf060adaf3016df03cebdee0aa2364c28aad85d2ac17d8bec4526aad44704de): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.782106 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(cdf060adaf3016df03cebdee0aa2364c28aad85d2ac17d8bec4526aad44704de): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.782128 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(cdf060adaf3016df03cebdee0aa2364c28aad85d2ac17d8bec4526aad44704de): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.782180 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators(28aa0136-6b61-4a88-907d-265c48e36f08)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators(28aa0136-6b61-4a88-907d-265c48e36f08)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_openshift-operators_28aa0136-6b61-4a88-907d-265c48e36f08_0(cdf060adaf3016df03cebdee0aa2364c28aad85d2ac17d8bec4526aad44704de): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" podUID="28aa0136-6b61-4a88-907d-265c48e36f08" Jan 21 11:10:40 crc kubenswrapper[4925]: I0121 11:10:40.791135 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc62z\" (UniqueName: \"kubernetes.io/projected/0d031a33-73a8-45d7-9979-e1266d9e7be7-kube-api-access-sc62z\") pod \"perses-operator-5bf474d74f-655nk\" (UID: \"0d031a33-73a8-45d7-9979-e1266d9e7be7\") " pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.815582 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(984f583017faa2903e776cedb6791855a2c5a72b462e0dd938e6102341be163f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.815830 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(984f583017faa2903e776cedb6791855a2c5a72b462e0dd938e6102341be163f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.815941 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(984f583017faa2903e776cedb6791855a2c5a72b462e0dd938e6102341be163f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:40 crc kubenswrapper[4925]: E0121 11:10:40.816080 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators(ea8b2f0b-f77a-4737-be37-3268437871d9)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators(ea8b2f0b-f77a-4737-be37-3268437871d9)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_openshift-operators_ea8b2f0b-f77a-4737-be37-3268437871d9_0(984f583017faa2903e776cedb6791855a2c5a72b462e0dd938e6102341be163f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" podUID="ea8b2f0b-f77a-4737-be37-3268437871d9" Jan 21 11:10:41 crc kubenswrapper[4925]: I0121 11:10:41.082377 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.120099 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(171463c5f5fc2992be654fc61b46c9eb6fb13c6f43169712442585358a70ed54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.120358 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(171463c5f5fc2992be654fc61b46c9eb6fb13c6f43169712442585358a70ed54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.120425 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(171463c5f5fc2992be654fc61b46c9eb6fb13c6f43169712442585358a70ed54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.120520 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-655nk_openshift-operators(0d031a33-73a8-45d7-9979-e1266d9e7be7)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-655nk_openshift-operators(0d031a33-73a8-45d7-9979-e1266d9e7be7)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(171463c5f5fc2992be654fc61b46c9eb6fb13c6f43169712442585358a70ed54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-655nk" podUID="0d031a33-73a8-45d7-9979-e1266d9e7be7" Jan 21 11:10:41 crc kubenswrapper[4925]: I0121 11:10:41.678818 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:41 crc kubenswrapper[4925]: I0121 11:10:41.679353 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:41 crc kubenswrapper[4925]: I0121 11:10:41.679993 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:41 crc kubenswrapper[4925]: I0121 11:10:41.680207 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.942283 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(1efcf8fb564dc1a1d88ee4c83cceffd248c379446851e345406528c55503c250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.942367 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(1efcf8fb564dc1a1d88ee4c83cceffd248c379446851e345406528c55503c250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.942537 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(1efcf8fb564dc1a1d88ee4c83cceffd248c379446851e345406528c55503c250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.942605 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-655nk_openshift-operators(0d031a33-73a8-45d7-9979-e1266d9e7be7)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-655nk_openshift-operators(0d031a33-73a8-45d7-9979-e1266d9e7be7)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-655nk_openshift-operators_0d031a33-73a8-45d7-9979-e1266d9e7be7_0(1efcf8fb564dc1a1d88ee4c83cceffd248c379446851e345406528c55503c250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-655nk" podUID="0d031a33-73a8-45d7-9979-e1266d9e7be7" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.953583 4925 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(4c2a34fb699583664b3b7ad2162c9dcb28c8dd0340fcf011cdcd39afd1ab1bea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.953672 4925 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(4c2a34fb699583664b3b7ad2162c9dcb28c8dd0340fcf011cdcd39afd1ab1bea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.953710 4925 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(4c2a34fb699583664b3b7ad2162c9dcb28c8dd0340fcf011cdcd39afd1ab1bea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:41 crc kubenswrapper[4925]: E0121 11:10:41.953869 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-jvt9p_openshift-operators(e052dc8b-2520-4757-bb0a-d1350ad44b08)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-jvt9p_openshift-operators(e052dc8b-2520-4757-bb0a-d1350ad44b08)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-jvt9p_openshift-operators_e052dc8b-2520-4757-bb0a-d1350ad44b08_0(4c2a34fb699583664b3b7ad2162c9dcb28c8dd0340fcf011cdcd39afd1ab1bea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" podUID="e052dc8b-2520-4757-bb0a-d1350ad44b08" Jan 21 11:10:44 crc kubenswrapper[4925]: I0121 11:10:44.668997 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvr8" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="registry-server" probeResult="failure" output=< Jan 21 11:10:44 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:10:44 crc kubenswrapper[4925]: > Jan 21 11:10:50 crc kubenswrapper[4925]: I0121 11:10:50.078975 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pfrnv" Jan 21 11:10:51 crc kubenswrapper[4925]: I0121 11:10:51.501209 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:51 crc kubenswrapper[4925]: I0121 11:10:51.502127 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" Jan 21 11:10:51 crc kubenswrapper[4925]: I0121 11:10:51.501283 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:51 crc kubenswrapper[4925]: I0121 11:10:51.502764 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" Jan 21 11:10:52 crc kubenswrapper[4925]: I0121 11:10:52.351911 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf"] Jan 21 11:10:52 crc kubenswrapper[4925]: W0121 11:10:52.372662 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28aa0136_6b61_4a88_907d_265c48e36f08.slice/crio-00f5cda8c88e5363fee296e235a620cc7b5c1ff54631305feeae346889de321d WatchSource:0}: Error finding container 00f5cda8c88e5363fee296e235a620cc7b5c1ff54631305feeae346889de321d: Status 404 returned error can't find the container with id 00f5cda8c88e5363fee296e235a620cc7b5c1ff54631305feeae346889de321d Jan 21 11:10:52 crc kubenswrapper[4925]: I0121 11:10:52.434100 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml"] Jan 21 11:10:52 crc kubenswrapper[4925]: W0121 11:10:52.455700 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5331ad9e_1914_414a_a7b2_b52eb191ba2f.slice/crio-5bb2ffedf7eb7398f96626b7b628f2974c64a9d9a5b4bc26d07a1c0a538bca0a WatchSource:0}: Error finding container 5bb2ffedf7eb7398f96626b7b628f2974c64a9d9a5b4bc26d07a1c0a538bca0a: Status 404 returned error can't find the container with id 5bb2ffedf7eb7398f96626b7b628f2974c64a9d9a5b4bc26d07a1c0a538bca0a Jan 21 11:10:52 crc kubenswrapper[4925]: I0121 11:10:52.936223 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:53 crc kubenswrapper[4925]: I0121 11:10:53.122900 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:53 crc kubenswrapper[4925]: I0121 11:10:53.179199 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" event={"ID":"28aa0136-6b61-4a88-907d-265c48e36f08","Type":"ContainerStarted","Data":"00f5cda8c88e5363fee296e235a620cc7b5c1ff54631305feeae346889de321d"} Jan 21 11:10:53 crc kubenswrapper[4925]: I0121 11:10:53.181321 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" event={"ID":"5331ad9e-1914-414a-a7b2-b52eb191ba2f","Type":"ContainerStarted","Data":"5bb2ffedf7eb7398f96626b7b628f2974c64a9d9a5b4bc26d07a1c0a538bca0a"} Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.474640 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gwvr8"] Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.475887 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gwvr8" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="registry-server" containerID="cri-o://b819e953e39eb3cd05b0fb93a5bfd61ffd987610715a209467c50d20f5659b64" gracePeriod=2 Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.509764 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.510096 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.510720 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.510852 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.511781 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:10:55 crc kubenswrapper[4925]: I0121 11:10:55.512300 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.270976 4925 generic.go:334] "Generic (PLEG): container finished" podID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerID="b819e953e39eb3cd05b0fb93a5bfd61ffd987610715a209467c50d20f5659b64" exitCode=0 Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.271625 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvr8" event={"ID":"6f744e39-b0b5-487d-80d8-7d8db370c838","Type":"ContainerDied","Data":"b819e953e39eb3cd05b0fb93a5bfd61ffd987610715a209467c50d20f5659b64"} Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.392550 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-jvt9p"] Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.784356 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw"] Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.812440 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-655nk"] Jan 21 11:10:56 crc kubenswrapper[4925]: W0121 11:10:56.826070 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d031a33_73a8_45d7_9979_e1266d9e7be7.slice/crio-bc1b0120b8adca6acfe5de42535045c3f3890edf14189ca7c81e84c631b6f09d WatchSource:0}: Error finding container bc1b0120b8adca6acfe5de42535045c3f3890edf14189ca7c81e84c631b6f09d: Status 404 returned error can't find the container with id bc1b0120b8adca6acfe5de42535045c3f3890edf14189ca7c81e84c631b6f09d Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.886016 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.973678 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-utilities\") pod \"6f744e39-b0b5-487d-80d8-7d8db370c838\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.973828 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjcl8\" (UniqueName: \"kubernetes.io/projected/6f744e39-b0b5-487d-80d8-7d8db370c838-kube-api-access-mjcl8\") pod \"6f744e39-b0b5-487d-80d8-7d8db370c838\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.973903 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-catalog-content\") pod \"6f744e39-b0b5-487d-80d8-7d8db370c838\" (UID: \"6f744e39-b0b5-487d-80d8-7d8db370c838\") " Jan 21 11:10:56 crc kubenswrapper[4925]: I0121 11:10:56.976165 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-utilities" (OuterVolumeSpecName: "utilities") pod "6f744e39-b0b5-487d-80d8-7d8db370c838" (UID: "6f744e39-b0b5-487d-80d8-7d8db370c838"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:56.989948 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f744e39-b0b5-487d-80d8-7d8db370c838-kube-api-access-mjcl8" (OuterVolumeSpecName: "kube-api-access-mjcl8") pod "6f744e39-b0b5-487d-80d8-7d8db370c838" (UID: "6f744e39-b0b5-487d-80d8-7d8db370c838"). InnerVolumeSpecName "kube-api-access-mjcl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.075172 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.075222 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjcl8\" (UniqueName: \"kubernetes.io/projected/6f744e39-b0b5-487d-80d8-7d8db370c838-kube-api-access-mjcl8\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.223233 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f744e39-b0b5-487d-80d8-7d8db370c838" (UID: "6f744e39-b0b5-487d-80d8-7d8db370c838"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.279014 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f744e39-b0b5-487d-80d8-7d8db370c838-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.281647 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-655nk" event={"ID":"0d031a33-73a8-45d7-9979-e1266d9e7be7","Type":"ContainerStarted","Data":"bc1b0120b8adca6acfe5de42535045c3f3890edf14189ca7c81e84c631b6f09d"} Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.283265 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" event={"ID":"ea8b2f0b-f77a-4737-be37-3268437871d9","Type":"ContainerStarted","Data":"89c1e4736e4def91c501e5e3d05be709cb90e8b25e4f748af4426fcc6a28548a"} Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.286207 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvr8" event={"ID":"6f744e39-b0b5-487d-80d8-7d8db370c838","Type":"ContainerDied","Data":"df42456ec1226ad8333294c684c6666ae292936a7c41d795f2db6c0f3ee3b57f"} Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.286265 4925 scope.go:117] "RemoveContainer" containerID="b819e953e39eb3cd05b0fb93a5bfd61ffd987610715a209467c50d20f5659b64" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.286499 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvr8" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.292040 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" event={"ID":"e052dc8b-2520-4757-bb0a-d1350ad44b08","Type":"ContainerStarted","Data":"a18ca0728e1aba3ecc55c1079c78e4027f36bf32566aa6ead40f60f0fde2caae"} Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.346544 4925 scope.go:117] "RemoveContainer" containerID="2496b9b10667474e68123c9b3ae1dc109d082953103a876b7d765ccc79e0d4f8" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.353805 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gwvr8"] Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.359049 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gwvr8"] Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.392416 4925 scope.go:117] "RemoveContainer" containerID="4c6e9c80cb7bf091d844892d0c649626fcf5a777331aad2b78639ddac1eab4ce" Jan 21 11:10:57 crc kubenswrapper[4925]: I0121 11:10:57.512951 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" path="/var/lib/kubelet/pods/6f744e39-b0b5-487d-80d8-7d8db370c838/volumes" Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.844490 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" event={"ID":"ea8b2f0b-f77a-4737-be37-3268437871d9","Type":"ContainerStarted","Data":"fa422fefcd720798334d86f67f918ba82058f8806a8eb99d7acc3d8d50fa594d"} Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.862810 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" event={"ID":"e052dc8b-2520-4757-bb0a-d1350ad44b08","Type":"ContainerStarted","Data":"69598f6729e826b0cb416fce6cbe12ec7bf55b49a0d63be070a188bfa29442f3"} Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.863637 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.864889 4925 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-jvt9p container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.17:8081/healthz\": dial tcp 10.217.0.17:8081: connect: connection refused" start-of-body= Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.864980 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" podUID="e052dc8b-2520-4757-bb0a-d1350ad44b08" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/healthz\": dial tcp 10.217.0.17:8081: connect: connection refused" Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.875474 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-655nk" event={"ID":"0d031a33-73a8-45d7-9979-e1266d9e7be7","Type":"ContainerStarted","Data":"2bcd34107f6af89575ed8061cf48fbcbbc3ed0cdbcb469b949db6dad2ffe2c05"} Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.875905 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.882470 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-l96bw" podStartSLOduration=20.268411857 podStartE2EDuration="33.882428696s" podCreationTimestamp="2026-01-21 11:10:38 +0000 UTC" firstStartedPulling="2026-01-21 11:10:56.824798874 +0000 UTC m=+948.428690808" lastFinishedPulling="2026-01-21 11:11:10.438815713 +0000 UTC m=+962.042707647" observedRunningTime="2026-01-21 11:11:11.876939283 +0000 UTC m=+963.480831217" watchObservedRunningTime="2026-01-21 11:11:11.882428696 +0000 UTC m=+963.486320630" Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.935204 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" podStartSLOduration=18.976461032 podStartE2EDuration="32.935182147s" podCreationTimestamp="2026-01-21 11:10:39 +0000 UTC" firstStartedPulling="2026-01-21 11:10:56.484379603 +0000 UTC m=+948.088271537" lastFinishedPulling="2026-01-21 11:11:10.443100708 +0000 UTC m=+962.046992652" observedRunningTime="2026-01-21 11:11:11.929169638 +0000 UTC m=+963.533061592" watchObservedRunningTime="2026-01-21 11:11:11.935182147 +0000 UTC m=+963.539074081" Jan 21 11:11:11 crc kubenswrapper[4925]: I0121 11:11:11.980819 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-655nk" podStartSLOduration=18.35459259 podStartE2EDuration="31.980783661s" podCreationTimestamp="2026-01-21 11:10:40 +0000 UTC" firstStartedPulling="2026-01-21 11:10:56.839497127 +0000 UTC m=+948.443389061" lastFinishedPulling="2026-01-21 11:11:10.465688198 +0000 UTC m=+962.069580132" observedRunningTime="2026-01-21 11:11:11.964820429 +0000 UTC m=+963.568712363" watchObservedRunningTime="2026-01-21 11:11:11.980783661 +0000 UTC m=+963.584675595" Jan 21 11:11:12 crc kubenswrapper[4925]: I0121 11:11:12.884233 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" event={"ID":"28aa0136-6b61-4a88-907d-265c48e36f08","Type":"ContainerStarted","Data":"e358c2dfe64b6432048eb7313dced7331eac9e032b701a9d705139ef2fbbc65d"} Jan 21 11:11:12 crc kubenswrapper[4925]: I0121 11:11:12.889783 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" event={"ID":"5331ad9e-1914-414a-a7b2-b52eb191ba2f","Type":"ContainerStarted","Data":"bf8b4db0d18fec71db2a74636a1f0643c4fea4ece65e8c6e0220570d9198a249"} Jan 21 11:11:12 crc kubenswrapper[4925]: I0121 11:11:12.904035 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf" podStartSLOduration=16.926484089 podStartE2EDuration="34.904006736s" podCreationTimestamp="2026-01-21 11:10:38 +0000 UTC" firstStartedPulling="2026-01-21 11:10:52.377497349 +0000 UTC m=+943.981389283" lastFinishedPulling="2026-01-21 11:11:10.355019996 +0000 UTC m=+961.958911930" observedRunningTime="2026-01-21 11:11:12.903699127 +0000 UTC m=+964.507591081" watchObservedRunningTime="2026-01-21 11:11:12.904006736 +0000 UTC m=+964.507898670" Jan 21 11:11:12 crc kubenswrapper[4925]: I0121 11:11:12.926461 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-x48ml" podStartSLOduration=16.953633634 podStartE2EDuration="34.926438722s" podCreationTimestamp="2026-01-21 11:10:38 +0000 UTC" firstStartedPulling="2026-01-21 11:10:52.466634604 +0000 UTC m=+944.070526538" lastFinishedPulling="2026-01-21 11:11:10.439439692 +0000 UTC m=+962.043331626" observedRunningTime="2026-01-21 11:11:12.924701507 +0000 UTC m=+964.528593451" watchObservedRunningTime="2026-01-21 11:11:12.926438722 +0000 UTC m=+964.530330656" Jan 21 11:11:12 crc kubenswrapper[4925]: I0121 11:11:12.938848 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-jvt9p" Jan 21 11:11:21 crc kubenswrapper[4925]: I0121 11:11:21.086310 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-655nk" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.296950 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg"] Jan 21 11:11:23 crc kubenswrapper[4925]: E0121 11:11:23.297315 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="registry-server" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.297348 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="registry-server" Jan 21 11:11:23 crc kubenswrapper[4925]: E0121 11:11:23.297420 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="extract-utilities" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.297432 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="extract-utilities" Jan 21 11:11:23 crc kubenswrapper[4925]: E0121 11:11:23.297458 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="extract-content" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.297470 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="extract-content" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.297634 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f744e39-b0b5-487d-80d8-7d8db370c838" containerName="registry-server" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.298898 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.302958 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.317826 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg"] Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.326819 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.327059 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.327156 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tt6p\" (UniqueName: \"kubernetes.io/projected/e058b309-ed1b-4162-a8e1-adf175ab47cf-kube-api-access-4tt6p\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.428272 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.428363 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tt6p\" (UniqueName: \"kubernetes.io/projected/e058b309-ed1b-4162-a8e1-adf175ab47cf-kube-api-access-4tt6p\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.428455 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.429321 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.429371 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.455334 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tt6p\" (UniqueName: \"kubernetes.io/projected/e058b309-ed1b-4162-a8e1-adf175ab47cf-kube-api-access-4tt6p\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:23 crc kubenswrapper[4925]: I0121 11:11:23.625231 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:24 crc kubenswrapper[4925]: I0121 11:11:24.189602 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg"] Jan 21 11:11:24 crc kubenswrapper[4925]: W0121 11:11:24.199972 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode058b309_ed1b_4162_a8e1_adf175ab47cf.slice/crio-f5525dd9da1f86d1ab45a700959a09dce511971168bdfe82e0a0c4a4776d9309 WatchSource:0}: Error finding container f5525dd9da1f86d1ab45a700959a09dce511971168bdfe82e0a0c4a4776d9309: Status 404 returned error can't find the container with id f5525dd9da1f86d1ab45a700959a09dce511971168bdfe82e0a0c4a4776d9309 Jan 21 11:11:25 crc kubenswrapper[4925]: I0121 11:11:25.149594 4925 generic.go:334] "Generic (PLEG): container finished" podID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerID="92ebf54582cfb228a7612cbb81e60776695b659538c924011c29609b09d948a4" exitCode=0 Jan 21 11:11:25 crc kubenswrapper[4925]: I0121 11:11:25.150111 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" event={"ID":"e058b309-ed1b-4162-a8e1-adf175ab47cf","Type":"ContainerDied","Data":"92ebf54582cfb228a7612cbb81e60776695b659538c924011c29609b09d948a4"} Jan 21 11:11:25 crc kubenswrapper[4925]: I0121 11:11:25.150188 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" event={"ID":"e058b309-ed1b-4162-a8e1-adf175ab47cf","Type":"ContainerStarted","Data":"f5525dd9da1f86d1ab45a700959a09dce511971168bdfe82e0a0c4a4776d9309"} Jan 21 11:11:27 crc kubenswrapper[4925]: I0121 11:11:27.167197 4925 generic.go:334] "Generic (PLEG): container finished" podID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerID="4dc5076dc3d81ca8f3d0998aacdfeb8c357c1c5ab2dead452d62eef11e6b8dc3" exitCode=0 Jan 21 11:11:27 crc kubenswrapper[4925]: I0121 11:11:27.167436 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" event={"ID":"e058b309-ed1b-4162-a8e1-adf175ab47cf","Type":"ContainerDied","Data":"4dc5076dc3d81ca8f3d0998aacdfeb8c357c1c5ab2dead452d62eef11e6b8dc3"} Jan 21 11:11:28 crc kubenswrapper[4925]: I0121 11:11:28.177992 4925 generic.go:334] "Generic (PLEG): container finished" podID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerID="2a996e0d113393d80b7e66f5b96cb2719f9391ae7ad51ec663b7fb50c9b66cef" exitCode=0 Jan 21 11:11:28 crc kubenswrapper[4925]: I0121 11:11:28.178051 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" event={"ID":"e058b309-ed1b-4162-a8e1-adf175ab47cf","Type":"ContainerDied","Data":"2a996e0d113393d80b7e66f5b96cb2719f9391ae7ad51ec663b7fb50c9b66cef"} Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.499953 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.515464 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-bundle\") pod \"e058b309-ed1b-4162-a8e1-adf175ab47cf\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.515541 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tt6p\" (UniqueName: \"kubernetes.io/projected/e058b309-ed1b-4162-a8e1-adf175ab47cf-kube-api-access-4tt6p\") pod \"e058b309-ed1b-4162-a8e1-adf175ab47cf\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.515639 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-util\") pod \"e058b309-ed1b-4162-a8e1-adf175ab47cf\" (UID: \"e058b309-ed1b-4162-a8e1-adf175ab47cf\") " Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.516251 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-bundle" (OuterVolumeSpecName: "bundle") pod "e058b309-ed1b-4162-a8e1-adf175ab47cf" (UID: "e058b309-ed1b-4162-a8e1-adf175ab47cf"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.523917 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e058b309-ed1b-4162-a8e1-adf175ab47cf-kube-api-access-4tt6p" (OuterVolumeSpecName: "kube-api-access-4tt6p") pod "e058b309-ed1b-4162-a8e1-adf175ab47cf" (UID: "e058b309-ed1b-4162-a8e1-adf175ab47cf"). InnerVolumeSpecName "kube-api-access-4tt6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.535631 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-util" (OuterVolumeSpecName: "util") pod "e058b309-ed1b-4162-a8e1-adf175ab47cf" (UID: "e058b309-ed1b-4162-a8e1-adf175ab47cf"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.617238 4925 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.617333 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tt6p\" (UniqueName: \"kubernetes.io/projected/e058b309-ed1b-4162-a8e1-adf175ab47cf-kube-api-access-4tt6p\") on node \"crc\" DevicePath \"\"" Jan 21 11:11:29 crc kubenswrapper[4925]: I0121 11:11:29.617371 4925 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e058b309-ed1b-4162-a8e1-adf175ab47cf-util\") on node \"crc\" DevicePath \"\"" Jan 21 11:11:30 crc kubenswrapper[4925]: I0121 11:11:30.198001 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" event={"ID":"e058b309-ed1b-4162-a8e1-adf175ab47cf","Type":"ContainerDied","Data":"f5525dd9da1f86d1ab45a700959a09dce511971168bdfe82e0a0c4a4776d9309"} Jan 21 11:11:30 crc kubenswrapper[4925]: I0121 11:11:30.198500 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5525dd9da1f86d1ab45a700959a09dce511971168bdfe82e0a0c4a4776d9309" Jan 21 11:11:30 crc kubenswrapper[4925]: I0121 11:11:30.198247 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.254770 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-8kzgs"] Jan 21 11:11:32 crc kubenswrapper[4925]: E0121 11:11:32.255090 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerName="pull" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.255109 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerName="pull" Jan 21 11:11:32 crc kubenswrapper[4925]: E0121 11:11:32.255129 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerName="extract" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.255139 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerName="extract" Jan 21 11:11:32 crc kubenswrapper[4925]: E0121 11:11:32.255153 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerName="util" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.255161 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerName="util" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.255326 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="e058b309-ed1b-4162-a8e1-adf175ab47cf" containerName="extract" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.255927 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.258679 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.258732 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.259209 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8tcrk" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.271868 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-8kzgs"] Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.377029 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9kl5\" (UniqueName: \"kubernetes.io/projected/88c3cb65-2aff-44db-85fb-8c365c93439f-kube-api-access-v9kl5\") pod \"nmstate-operator-646758c888-8kzgs\" (UID: \"88c3cb65-2aff-44db-85fb-8c365c93439f\") " pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.478679 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9kl5\" (UniqueName: \"kubernetes.io/projected/88c3cb65-2aff-44db-85fb-8c365c93439f-kube-api-access-v9kl5\") pod \"nmstate-operator-646758c888-8kzgs\" (UID: \"88c3cb65-2aff-44db-85fb-8c365c93439f\") " pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.503463 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9kl5\" (UniqueName: \"kubernetes.io/projected/88c3cb65-2aff-44db-85fb-8c365c93439f-kube-api-access-v9kl5\") pod \"nmstate-operator-646758c888-8kzgs\" (UID: \"88c3cb65-2aff-44db-85fb-8c365c93439f\") " pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.578232 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" Jan 21 11:11:32 crc kubenswrapper[4925]: I0121 11:11:32.943568 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-8kzgs"] Jan 21 11:11:33 crc kubenswrapper[4925]: I0121 11:11:33.218244 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" event={"ID":"88c3cb65-2aff-44db-85fb-8c365c93439f","Type":"ContainerStarted","Data":"4d8367b59a70bc7fc60b8767f4d296564cdd363b3936071302842d51741248e9"} Jan 21 11:11:39 crc kubenswrapper[4925]: I0121 11:11:39.267490 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" event={"ID":"88c3cb65-2aff-44db-85fb-8c365c93439f","Type":"ContainerStarted","Data":"a51a081a506cc1981863dd35be09fc338266c950599437487f52cb13dc8153cd"} Jan 21 11:11:39 crc kubenswrapper[4925]: I0121 11:11:39.292286 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-8kzgs" podStartSLOduration=1.9423546950000001 podStartE2EDuration="7.292264523s" podCreationTimestamp="2026-01-21 11:11:32 +0000 UTC" firstStartedPulling="2026-01-21 11:11:32.955409962 +0000 UTC m=+984.559301896" lastFinishedPulling="2026-01-21 11:11:38.30531979 +0000 UTC m=+989.909211724" observedRunningTime="2026-01-21 11:11:39.288671189 +0000 UTC m=+990.892563143" watchObservedRunningTime="2026-01-21 11:11:39.292264523 +0000 UTC m=+990.896156457" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.313892 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.315949 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.320060 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-c5llf"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.320639 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.321044 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-6stc9" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.321187 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.343304 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.389376 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-j7spk"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.394963 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.396616 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-c5llf"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.447855 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-ovs-socket\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.447953 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-nmstate-lock\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.447981 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-dbus-socket\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.448007 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgf8m\" (UniqueName: \"kubernetes.io/projected/3c739657-6960-46fe-be71-6d965b98e714-kube-api-access-zgf8m\") pod \"nmstate-webhook-8474b5b9d8-87q5g\" (UID: \"3c739657-6960-46fe-be71-6d965b98e714\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.448256 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7drp\" (UniqueName: \"kubernetes.io/projected/e125e9a3-31e1-47ce-ab99-c65ace2a60ec-kube-api-access-g7drp\") pod \"nmstate-metrics-54757c584b-c5llf\" (UID: \"e125e9a3-31e1-47ce-ab99-c65ace2a60ec\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.448351 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3c739657-6960-46fe-be71-6d965b98e714-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-87q5g\" (UID: \"3c739657-6960-46fe-be71-6d965b98e714\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.448434 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcz6k\" (UniqueName: \"kubernetes.io/projected/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-kube-api-access-bcz6k\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.480732 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.481834 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.491163 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.491448 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-s7qmj" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.491621 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.536513 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549594 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7drp\" (UniqueName: \"kubernetes.io/projected/e125e9a3-31e1-47ce-ab99-c65ace2a60ec-kube-api-access-g7drp\") pod \"nmstate-metrics-54757c584b-c5llf\" (UID: \"e125e9a3-31e1-47ce-ab99-c65ace2a60ec\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549660 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3c739657-6960-46fe-be71-6d965b98e714-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-87q5g\" (UID: \"3c739657-6960-46fe-be71-6d965b98e714\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549698 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcz6k\" (UniqueName: \"kubernetes.io/projected/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-kube-api-access-bcz6k\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549723 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8mm7\" (UniqueName: \"kubernetes.io/projected/eac14392-e11f-4bf9-b1db-d6200c0d0821-kube-api-access-v8mm7\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549769 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-ovs-socket\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549808 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-nmstate-lock\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549826 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/eac14392-e11f-4bf9-b1db-d6200c0d0821-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549849 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-dbus-socket\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549867 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/eac14392-e11f-4bf9-b1db-d6200c0d0821-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.549890 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgf8m\" (UniqueName: \"kubernetes.io/projected/3c739657-6960-46fe-be71-6d965b98e714-kube-api-access-zgf8m\") pod \"nmstate-webhook-8474b5b9d8-87q5g\" (UID: \"3c739657-6960-46fe-be71-6d965b98e714\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:46 crc kubenswrapper[4925]: E0121 11:11:46.549852 4925 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 21 11:11:46 crc kubenswrapper[4925]: E0121 11:11:46.550045 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c739657-6960-46fe-be71-6d965b98e714-tls-key-pair podName:3c739657-6960-46fe-be71-6d965b98e714 nodeName:}" failed. No retries permitted until 2026-01-21 11:11:47.04999282 +0000 UTC m=+998.653884754 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/3c739657-6960-46fe-be71-6d965b98e714-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-87q5g" (UID: "3c739657-6960-46fe-be71-6d965b98e714") : secret "openshift-nmstate-webhook" not found Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.550075 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-ovs-socket\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.550046 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-nmstate-lock\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.550468 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-dbus-socket\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.571104 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7drp\" (UniqueName: \"kubernetes.io/projected/e125e9a3-31e1-47ce-ab99-c65ace2a60ec-kube-api-access-g7drp\") pod \"nmstate-metrics-54757c584b-c5llf\" (UID: \"e125e9a3-31e1-47ce-ab99-c65ace2a60ec\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.571123 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgf8m\" (UniqueName: \"kubernetes.io/projected/3c739657-6960-46fe-be71-6d965b98e714-kube-api-access-zgf8m\") pod \"nmstate-webhook-8474b5b9d8-87q5g\" (UID: \"3c739657-6960-46fe-be71-6d965b98e714\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.572072 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcz6k\" (UniqueName: \"kubernetes.io/projected/a3532c91-ec04-4a0c-99d3-bf6bf96a8887-kube-api-access-bcz6k\") pod \"nmstate-handler-j7spk\" (UID: \"a3532c91-ec04-4a0c-99d3-bf6bf96a8887\") " pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.651385 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8mm7\" (UniqueName: \"kubernetes.io/projected/eac14392-e11f-4bf9-b1db-d6200c0d0821-kube-api-access-v8mm7\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.651910 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/eac14392-e11f-4bf9-b1db-d6200c0d0821-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.651938 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/eac14392-e11f-4bf9-b1db-d6200c0d0821-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.653382 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/eac14392-e11f-4bf9-b1db-d6200c0d0821-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.653787 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.658656 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/eac14392-e11f-4bf9-b1db-d6200c0d0821-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.671093 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-ddcb56777-sqrzz"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.671958 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.701324 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-ddcb56777-sqrzz"] Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.701543 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8mm7\" (UniqueName: \"kubernetes.io/projected/eac14392-e11f-4bf9-b1db-d6200c0d0821-kube-api-access-v8mm7\") pod \"nmstate-console-plugin-7754f76f8b-58tbd\" (UID: \"eac14392-e11f-4bf9-b1db-d6200c0d0821\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.720973 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.752740 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkqsw\" (UniqueName: \"kubernetes.io/projected/09c16233-6644-4a24-96d8-69d72c8c921d-kube-api-access-pkqsw\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.752815 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-serving-cert\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.752998 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-service-ca\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.753030 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-trusted-ca-bundle\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.753046 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-oauth-config\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.753104 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-oauth-serving-cert\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.753132 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-console-config\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: W0121 11:11:46.772468 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3532c91_ec04_4a0c_99d3_bf6bf96a8887.slice/crio-dd8b5c12d96311a793f9d21c2dbde13e0e115ef68d647e8c7d8e14b238405a19 WatchSource:0}: Error finding container dd8b5c12d96311a793f9d21c2dbde13e0e115ef68d647e8c7d8e14b238405a19: Status 404 returned error can't find the container with id dd8b5c12d96311a793f9d21c2dbde13e0e115ef68d647e8c7d8e14b238405a19 Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.807910 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.885456 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkqsw\" (UniqueName: \"kubernetes.io/projected/09c16233-6644-4a24-96d8-69d72c8c921d-kube-api-access-pkqsw\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.885509 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-serving-cert\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.885539 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-service-ca\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.885575 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-trusted-ca-bundle\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.885594 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-oauth-config\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.885640 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-oauth-serving-cert\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.885662 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-console-config\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.887022 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-console-config\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.887211 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-service-ca\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.888519 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-trusted-ca-bundle\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.891919 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-serving-cert\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.892708 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-oauth-serving-cert\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.893324 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-oauth-config\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:46 crc kubenswrapper[4925]: I0121 11:11:46.909300 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkqsw\" (UniqueName: \"kubernetes.io/projected/09c16233-6644-4a24-96d8-69d72c8c921d-kube-api-access-pkqsw\") pod \"console-ddcb56777-sqrzz\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.061016 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.065640 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-c5llf"] Jan 21 11:11:47 crc kubenswrapper[4925]: W0121 11:11:47.079356 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode125e9a3_31e1_47ce_ab99_c65ace2a60ec.slice/crio-d90c11cb2c1c7c1213a25e26f3dbad9ecfff3bf586bcc5cecc6bbb6b43701dc6 WatchSource:0}: Error finding container d90c11cb2c1c7c1213a25e26f3dbad9ecfff3bf586bcc5cecc6bbb6b43701dc6: Status 404 returned error can't find the container with id d90c11cb2c1c7c1213a25e26f3dbad9ecfff3bf586bcc5cecc6bbb6b43701dc6 Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.090936 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3c739657-6960-46fe-be71-6d965b98e714-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-87q5g\" (UID: \"3c739657-6960-46fe-be71-6d965b98e714\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.097233 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3c739657-6960-46fe-be71-6d965b98e714-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-87q5g\" (UID: \"3c739657-6960-46fe-be71-6d965b98e714\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.245272 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.281988 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-ddcb56777-sqrzz"] Jan 21 11:11:47 crc kubenswrapper[4925]: W0121 11:11:47.291634 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09c16233_6644_4a24_96d8_69d72c8c921d.slice/crio-a5b1a3fb9aa961f3ed9e7e1981adf07a1d724d6c9f7cd1f2df85c7413edffd56 WatchSource:0}: Error finding container a5b1a3fb9aa961f3ed9e7e1981adf07a1d724d6c9f7cd1f2df85c7413edffd56: Status 404 returned error can't find the container with id a5b1a3fb9aa961f3ed9e7e1981adf07a1d724d6c9f7cd1f2df85c7413edffd56 Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.340214 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" event={"ID":"e125e9a3-31e1-47ce-ab99-c65ace2a60ec","Type":"ContainerStarted","Data":"d90c11cb2c1c7c1213a25e26f3dbad9ecfff3bf586bcc5cecc6bbb6b43701dc6"} Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.345262 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-j7spk" event={"ID":"a3532c91-ec04-4a0c-99d3-bf6bf96a8887","Type":"ContainerStarted","Data":"dd8b5c12d96311a793f9d21c2dbde13e0e115ef68d647e8c7d8e14b238405a19"} Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.349947 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd"] Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.351435 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-ddcb56777-sqrzz" event={"ID":"09c16233-6644-4a24-96d8-69d72c8c921d","Type":"ContainerStarted","Data":"a5b1a3fb9aa961f3ed9e7e1981adf07a1d724d6c9f7cd1f2df85c7413edffd56"} Jan 21 11:11:47 crc kubenswrapper[4925]: W0121 11:11:47.364542 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeac14392_e11f_4bf9_b1db_d6200c0d0821.slice/crio-5c0c600c0edc99dab7133fb7c4671b50cd2f337bf70789b7560e7cbf946678d9 WatchSource:0}: Error finding container 5c0c600c0edc99dab7133fb7c4671b50cd2f337bf70789b7560e7cbf946678d9: Status 404 returned error can't find the container with id 5c0c600c0edc99dab7133fb7c4671b50cd2f337bf70789b7560e7cbf946678d9 Jan 21 11:11:47 crc kubenswrapper[4925]: W0121 11:11:47.508819 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c739657_6960_46fe_be71_6d965b98e714.slice/crio-48514a2830f240a0ee8d36bb1570be714846c144d5228d8983b5d48bad0d5717 WatchSource:0}: Error finding container 48514a2830f240a0ee8d36bb1570be714846c144d5228d8983b5d48bad0d5717: Status 404 returned error can't find the container with id 48514a2830f240a0ee8d36bb1570be714846c144d5228d8983b5d48bad0d5717 Jan 21 11:11:47 crc kubenswrapper[4925]: I0121 11:11:47.520449 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g"] Jan 21 11:11:48 crc kubenswrapper[4925]: I0121 11:11:48.360786 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" event={"ID":"3c739657-6960-46fe-be71-6d965b98e714","Type":"ContainerStarted","Data":"48514a2830f240a0ee8d36bb1570be714846c144d5228d8983b5d48bad0d5717"} Jan 21 11:11:48 crc kubenswrapper[4925]: I0121 11:11:48.363228 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-ddcb56777-sqrzz" event={"ID":"09c16233-6644-4a24-96d8-69d72c8c921d","Type":"ContainerStarted","Data":"7299e6e690be0b64c48b8220ff680a3eddeb29b3f6ed78a0ff31f197ef77e368"} Jan 21 11:11:48 crc kubenswrapper[4925]: I0121 11:11:48.365843 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" event={"ID":"eac14392-e11f-4bf9-b1db-d6200c0d0821","Type":"ContainerStarted","Data":"5c0c600c0edc99dab7133fb7c4671b50cd2f337bf70789b7560e7cbf946678d9"} Jan 21 11:11:48 crc kubenswrapper[4925]: I0121 11:11:48.388076 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-ddcb56777-sqrzz" podStartSLOduration=2.388033514 podStartE2EDuration="2.388033514s" podCreationTimestamp="2026-01-21 11:11:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:11:48.385618067 +0000 UTC m=+999.989510021" watchObservedRunningTime="2026-01-21 11:11:48.388033514 +0000 UTC m=+999.991925448" Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.419501 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" event={"ID":"e125e9a3-31e1-47ce-ab99-c65ace2a60ec","Type":"ContainerStarted","Data":"b039e769f8d4380ad2ce635a4a9ff9d29159a65f1d90a57e6dfb0804a247fa2b"} Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.421314 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" event={"ID":"3c739657-6960-46fe-be71-6d965b98e714","Type":"ContainerStarted","Data":"bd54c0d507ce85bcc0a8c3b4256d53107c85bc3616443b1d1087bee05f760266"} Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.421730 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.427159 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-j7spk" event={"ID":"a3532c91-ec04-4a0c-99d3-bf6bf96a8887","Type":"ContainerStarted","Data":"f2ae56ccf37c8e54c807db9e60752aaa73fb71c06b259d40a839f3265ab4690c"} Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.427286 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.429701 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" event={"ID":"eac14392-e11f-4bf9-b1db-d6200c0d0821","Type":"ContainerStarted","Data":"b0aed6b191a8aea3be0c4a1daf9b9104f610a254c6a8f71234762f5d43afeca5"} Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.452485 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" podStartSLOduration=2.642383686 podStartE2EDuration="7.452457511s" podCreationTimestamp="2026-01-21 11:11:46 +0000 UTC" firstStartedPulling="2026-01-21 11:11:47.514129639 +0000 UTC m=+999.118021573" lastFinishedPulling="2026-01-21 11:11:52.324203464 +0000 UTC m=+1003.928095398" observedRunningTime="2026-01-21 11:11:53.441317177 +0000 UTC m=+1005.045209111" watchObservedRunningTime="2026-01-21 11:11:53.452457511 +0000 UTC m=+1005.056349455" Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.477253 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-j7spk" podStartSLOduration=1.934116798 podStartE2EDuration="7.477230967s" podCreationTimestamp="2026-01-21 11:11:46 +0000 UTC" firstStartedPulling="2026-01-21 11:11:46.778755221 +0000 UTC m=+998.382647155" lastFinishedPulling="2026-01-21 11:11:52.32186938 +0000 UTC m=+1003.925761324" observedRunningTime="2026-01-21 11:11:53.466541808 +0000 UTC m=+1005.070433762" watchObservedRunningTime="2026-01-21 11:11:53.477230967 +0000 UTC m=+1005.081123071" Jan 21 11:11:53 crc kubenswrapper[4925]: I0121 11:11:53.505734 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-58tbd" podStartSLOduration=2.556948254 podStartE2EDuration="7.505706761s" podCreationTimestamp="2026-01-21 11:11:46 +0000 UTC" firstStartedPulling="2026-01-21 11:11:47.367159494 +0000 UTC m=+998.971051428" lastFinishedPulling="2026-01-21 11:11:52.315918001 +0000 UTC m=+1003.919809935" observedRunningTime="2026-01-21 11:11:53.496634623 +0000 UTC m=+1005.100526557" watchObservedRunningTime="2026-01-21 11:11:53.505706761 +0000 UTC m=+1005.109598685" Jan 21 11:11:57 crc kubenswrapper[4925]: I0121 11:11:57.062320 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:57 crc kubenswrapper[4925]: I0121 11:11:57.063880 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:57 crc kubenswrapper[4925]: I0121 11:11:57.067152 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:57 crc kubenswrapper[4925]: I0121 11:11:57.465690 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:11:57 crc kubenswrapper[4925]: I0121 11:11:57.597147 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-7lrsj"] Jan 21 11:11:58 crc kubenswrapper[4925]: I0121 11:11:58.472730 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" event={"ID":"e125e9a3-31e1-47ce-ab99-c65ace2a60ec","Type":"ContainerStarted","Data":"ceb2cef9a902728123a38e114d547e0632a83ad4ce67fb49d67fdc5c1e04038c"} Jan 21 11:11:58 crc kubenswrapper[4925]: I0121 11:11:58.506907 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-c5llf" podStartSLOduration=1.956639181 podStartE2EDuration="12.50687432s" podCreationTimestamp="2026-01-21 11:11:46 +0000 UTC" firstStartedPulling="2026-01-21 11:11:47.083968316 +0000 UTC m=+998.687860250" lastFinishedPulling="2026-01-21 11:11:57.634203465 +0000 UTC m=+1009.238095389" observedRunningTime="2026-01-21 11:11:58.500717815 +0000 UTC m=+1010.104609769" watchObservedRunningTime="2026-01-21 11:11:58.50687432 +0000 UTC m=+1010.110766244" Jan 21 11:11:59 crc kubenswrapper[4925]: I0121 11:11:59.807778 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k78s8"] Jan 21 11:11:59 crc kubenswrapper[4925]: I0121 11:11:59.809418 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:11:59 crc kubenswrapper[4925]: I0121 11:11:59.826507 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k78s8"] Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.001024 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-catalog-content\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.001523 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrh2g\" (UniqueName: \"kubernetes.io/projected/d8204e9e-556b-4f34-be42-cc7ace8df9ce-kube-api-access-rrh2g\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.001672 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-utilities\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.102778 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-catalog-content\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.102877 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrh2g\" (UniqueName: \"kubernetes.io/projected/d8204e9e-556b-4f34-be42-cc7ace8df9ce-kube-api-access-rrh2g\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.102917 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-utilities\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.103750 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-catalog-content\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.103807 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-utilities\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.126304 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrh2g\" (UniqueName: \"kubernetes.io/projected/d8204e9e-556b-4f34-be42-cc7ace8df9ce-kube-api-access-rrh2g\") pod \"community-operators-k78s8\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:00 crc kubenswrapper[4925]: I0121 11:12:00.203200 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:01 crc kubenswrapper[4925]: I0121 11:12:01.141486 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k78s8"] Jan 21 11:12:01 crc kubenswrapper[4925]: I0121 11:12:01.510995 4925 generic.go:334] "Generic (PLEG): container finished" podID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerID="153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9" exitCode=0 Jan 21 11:12:01 crc kubenswrapper[4925]: I0121 11:12:01.531886 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k78s8" event={"ID":"d8204e9e-556b-4f34-be42-cc7ace8df9ce","Type":"ContainerDied","Data":"153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9"} Jan 21 11:12:01 crc kubenswrapper[4925]: I0121 11:12:01.532573 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k78s8" event={"ID":"d8204e9e-556b-4f34-be42-cc7ace8df9ce","Type":"ContainerStarted","Data":"a7769768b9342cb346f0f599c284d5781a774a89efc6abbe85c651563f53ffa2"} Jan 21 11:12:01 crc kubenswrapper[4925]: I0121 11:12:01.752759 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-j7spk" Jan 21 11:12:03 crc kubenswrapper[4925]: I0121 11:12:03.527886 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k78s8" event={"ID":"d8204e9e-556b-4f34-be42-cc7ace8df9ce","Type":"ContainerStarted","Data":"e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb"} Jan 21 11:12:04 crc kubenswrapper[4925]: I0121 11:12:04.538192 4925 generic.go:334] "Generic (PLEG): container finished" podID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerID="e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb" exitCode=0 Jan 21 11:12:04 crc kubenswrapper[4925]: I0121 11:12:04.538289 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k78s8" event={"ID":"d8204e9e-556b-4f34-be42-cc7ace8df9ce","Type":"ContainerDied","Data":"e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb"} Jan 21 11:12:05 crc kubenswrapper[4925]: I0121 11:12:05.547743 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k78s8" event={"ID":"d8204e9e-556b-4f34-be42-cc7ace8df9ce","Type":"ContainerStarted","Data":"6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7"} Jan 21 11:12:05 crc kubenswrapper[4925]: I0121 11:12:05.574865 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k78s8" podStartSLOduration=3.093098208 podStartE2EDuration="6.574832297s" podCreationTimestamp="2026-01-21 11:11:59 +0000 UTC" firstStartedPulling="2026-01-21 11:12:01.515523906 +0000 UTC m=+1013.119415840" lastFinishedPulling="2026-01-21 11:12:04.997257995 +0000 UTC m=+1016.601149929" observedRunningTime="2026-01-21 11:12:05.571342676 +0000 UTC m=+1017.175234620" watchObservedRunningTime="2026-01-21 11:12:05.574832297 +0000 UTC m=+1017.178724231" Jan 21 11:12:07 crc kubenswrapper[4925]: I0121 11:12:07.253599 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-87q5g" Jan 21 11:12:10 crc kubenswrapper[4925]: I0121 11:12:10.204293 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:10 crc kubenswrapper[4925]: I0121 11:12:10.204893 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:10 crc kubenswrapper[4925]: I0121 11:12:10.257342 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:10 crc kubenswrapper[4925]: I0121 11:12:10.828528 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:10 crc kubenswrapper[4925]: I0121 11:12:10.872008 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k78s8"] Jan 21 11:12:12 crc kubenswrapper[4925]: I0121 11:12:12.599952 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k78s8" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="registry-server" containerID="cri-o://6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7" gracePeriod=2 Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.050297 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.175223 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-catalog-content\") pod \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.175665 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-utilities\") pod \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.175789 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrh2g\" (UniqueName: \"kubernetes.io/projected/d8204e9e-556b-4f34-be42-cc7ace8df9ce-kube-api-access-rrh2g\") pod \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\" (UID: \"d8204e9e-556b-4f34-be42-cc7ace8df9ce\") " Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.177008 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-utilities" (OuterVolumeSpecName: "utilities") pod "d8204e9e-556b-4f34-be42-cc7ace8df9ce" (UID: "d8204e9e-556b-4f34-be42-cc7ace8df9ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.191094 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8204e9e-556b-4f34-be42-cc7ace8df9ce-kube-api-access-rrh2g" (OuterVolumeSpecName: "kube-api-access-rrh2g") pod "d8204e9e-556b-4f34-be42-cc7ace8df9ce" (UID: "d8204e9e-556b-4f34-be42-cc7ace8df9ce"). InnerVolumeSpecName "kube-api-access-rrh2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.237139 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8204e9e-556b-4f34-be42-cc7ace8df9ce" (UID: "d8204e9e-556b-4f34-be42-cc7ace8df9ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.278243 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.278328 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrh2g\" (UniqueName: \"kubernetes.io/projected/d8204e9e-556b-4f34-be42-cc7ace8df9ce-kube-api-access-rrh2g\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.278346 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8204e9e-556b-4f34-be42-cc7ace8df9ce-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.622971 4925 generic.go:334] "Generic (PLEG): container finished" podID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerID="6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7" exitCode=0 Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.623078 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k78s8" event={"ID":"d8204e9e-556b-4f34-be42-cc7ace8df9ce","Type":"ContainerDied","Data":"6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7"} Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.623575 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k78s8" event={"ID":"d8204e9e-556b-4f34-be42-cc7ace8df9ce","Type":"ContainerDied","Data":"a7769768b9342cb346f0f599c284d5781a774a89efc6abbe85c651563f53ffa2"} Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.623672 4925 scope.go:117] "RemoveContainer" containerID="6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.623113 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k78s8" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.662954 4925 scope.go:117] "RemoveContainer" containerID="e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.663272 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k78s8"] Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.676129 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k78s8"] Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.795580 4925 scope.go:117] "RemoveContainer" containerID="153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.864751 4925 scope.go:117] "RemoveContainer" containerID="6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7" Jan 21 11:12:14 crc kubenswrapper[4925]: E0121 11:12:14.867699 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7\": container with ID starting with 6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7 not found: ID does not exist" containerID="6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.867749 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7"} err="failed to get container status \"6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7\": rpc error: code = NotFound desc = could not find container \"6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7\": container with ID starting with 6f36d1e675a011763dc13279c60712d7a28802e18239d5d9c9d2b91aa7cd8ca7 not found: ID does not exist" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.867780 4925 scope.go:117] "RemoveContainer" containerID="e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb" Jan 21 11:12:14 crc kubenswrapper[4925]: E0121 11:12:14.881746 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb\": container with ID starting with e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb not found: ID does not exist" containerID="e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.881817 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb"} err="failed to get container status \"e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb\": rpc error: code = NotFound desc = could not find container \"e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb\": container with ID starting with e1f95b19594708327084570e2035757f2ba25a8691cf41dba32d4bb5374371fb not found: ID does not exist" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.881876 4925 scope.go:117] "RemoveContainer" containerID="153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9" Jan 21 11:12:14 crc kubenswrapper[4925]: E0121 11:12:14.882798 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9\": container with ID starting with 153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9 not found: ID does not exist" containerID="153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9" Jan 21 11:12:14 crc kubenswrapper[4925]: I0121 11:12:14.882858 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9"} err="failed to get container status \"153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9\": rpc error: code = NotFound desc = could not find container \"153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9\": container with ID starting with 153f96a7903c52a97fda982c6b89808d31f81ef10302188ed800dbdf666eaef9 not found: ID does not exist" Jan 21 11:12:15 crc kubenswrapper[4925]: I0121 11:12:15.518423 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" path="/var/lib/kubelet/pods/d8204e9e-556b-4f34-be42-cc7ace8df9ce/volumes" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.008598 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76"] Jan 21 11:12:22 crc kubenswrapper[4925]: E0121 11:12:22.011589 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="registry-server" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.011718 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="registry-server" Jan 21 11:12:22 crc kubenswrapper[4925]: E0121 11:12:22.011796 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="extract-content" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.011851 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="extract-content" Jan 21 11:12:22 crc kubenswrapper[4925]: E0121 11:12:22.011913 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="extract-utilities" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.011970 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="extract-utilities" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.012148 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8204e9e-556b-4f34-be42-cc7ace8df9ce" containerName="registry-server" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.013452 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.015948 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.027428 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76"] Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.108501 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p74ll\" (UniqueName: \"kubernetes.io/projected/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-kube-api-access-p74ll\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.109181 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.109283 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.211531 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.211704 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.211777 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p74ll\" (UniqueName: \"kubernetes.io/projected/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-kube-api-access-p74ll\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.212497 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.212512 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.236194 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p74ll\" (UniqueName: \"kubernetes.io/projected/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-kube-api-access-p74ll\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.339861 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.661695 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-7lrsj" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" containerID="cri-o://8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445" gracePeriod=15 Jan 21 11:12:22 crc kubenswrapper[4925]: I0121 11:12:22.743835 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76"] Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.036699 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-7lrsj_59445cd0-2391-49e1-9a4e-6ca280c8ab85/console/0.log" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.039574 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.131531 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-serving-cert\") pod \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.131640 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-oauth-config\") pod \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.131678 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-668sl\" (UniqueName: \"kubernetes.io/projected/59445cd0-2391-49e1-9a4e-6ca280c8ab85-kube-api-access-668sl\") pod \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.131707 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-service-ca\") pod \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.133317 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-service-ca" (OuterVolumeSpecName: "service-ca") pod "59445cd0-2391-49e1-9a4e-6ca280c8ab85" (UID: "59445cd0-2391-49e1-9a4e-6ca280c8ab85"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.133731 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-oauth-serving-cert\") pod \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.133783 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-trusted-ca-bundle\") pod \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.133890 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-config\") pod \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\" (UID: \"59445cd0-2391-49e1-9a4e-6ca280c8ab85\") " Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.134259 4925 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.134590 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-config" (OuterVolumeSpecName: "console-config") pod "59445cd0-2391-49e1-9a4e-6ca280c8ab85" (UID: "59445cd0-2391-49e1-9a4e-6ca280c8ab85"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.134514 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "59445cd0-2391-49e1-9a4e-6ca280c8ab85" (UID: "59445cd0-2391-49e1-9a4e-6ca280c8ab85"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.134874 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "59445cd0-2391-49e1-9a4e-6ca280c8ab85" (UID: "59445cd0-2391-49e1-9a4e-6ca280c8ab85"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.140664 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59445cd0-2391-49e1-9a4e-6ca280c8ab85-kube-api-access-668sl" (OuterVolumeSpecName: "kube-api-access-668sl") pod "59445cd0-2391-49e1-9a4e-6ca280c8ab85" (UID: "59445cd0-2391-49e1-9a4e-6ca280c8ab85"). InnerVolumeSpecName "kube-api-access-668sl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.141265 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "59445cd0-2391-49e1-9a4e-6ca280c8ab85" (UID: "59445cd0-2391-49e1-9a4e-6ca280c8ab85"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.141355 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "59445cd0-2391-49e1-9a4e-6ca280c8ab85" (UID: "59445cd0-2391-49e1-9a4e-6ca280c8ab85"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.235977 4925 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.236115 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-668sl\" (UniqueName: \"kubernetes.io/projected/59445cd0-2391-49e1-9a4e-6ca280c8ab85-kube-api-access-668sl\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.236136 4925 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.236159 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.236180 4925 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.236197 4925 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/59445cd0-2391-49e1-9a4e-6ca280c8ab85-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.702070 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-7lrsj_59445cd0-2391-49e1-9a4e-6ca280c8ab85/console/0.log" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.702569 4925 generic.go:334] "Generic (PLEG): container finished" podID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerID="8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445" exitCode=2 Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.702695 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7lrsj" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.702714 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7lrsj" event={"ID":"59445cd0-2391-49e1-9a4e-6ca280c8ab85","Type":"ContainerDied","Data":"8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445"} Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.702757 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7lrsj" event={"ID":"59445cd0-2391-49e1-9a4e-6ca280c8ab85","Type":"ContainerDied","Data":"e8702c105249c0af8a0934331b188edb3059ed7ecc3f38c7760e1d63c4f49bb4"} Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.702781 4925 scope.go:117] "RemoveContainer" containerID="8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.708906 4925 generic.go:334] "Generic (PLEG): container finished" podID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerID="de9bf2767b67626c654c8cf3f2ae3625a061a2db021016b92e7167b3e8c69bf0" exitCode=0 Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.709000 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" event={"ID":"5dfe3134-6d2f-47d9-b786-f69a4fbcf164","Type":"ContainerDied","Data":"de9bf2767b67626c654c8cf3f2ae3625a061a2db021016b92e7167b3e8c69bf0"} Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.709070 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" event={"ID":"5dfe3134-6d2f-47d9-b786-f69a4fbcf164","Type":"ContainerStarted","Data":"335465535c8c2bf92256fba0e49fc95850f3a0f6260abd4a06e47db66af581f7"} Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.734762 4925 scope.go:117] "RemoveContainer" containerID="8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445" Jan 21 11:12:23 crc kubenswrapper[4925]: E0121 11:12:23.735589 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445\": container with ID starting with 8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445 not found: ID does not exist" containerID="8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.735637 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445"} err="failed to get container status \"8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445\": rpc error: code = NotFound desc = could not find container \"8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445\": container with ID starting with 8f81db2babdb04de5682e533887674acc20612010f7eaadc4c0b1e620eb68445 not found: ID does not exist" Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.738661 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-7lrsj"] Jan 21 11:12:23 crc kubenswrapper[4925]: I0121 11:12:23.742583 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-7lrsj"] Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.511080 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" path="/var/lib/kubelet/pods/59445cd0-2391-49e1-9a4e-6ca280c8ab85/volumes" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.544722 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qmnvc"] Jan 21 11:12:25 crc kubenswrapper[4925]: E0121 11:12:25.545106 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.545123 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.545496 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="59445cd0-2391-49e1-9a4e-6ca280c8ab85" containerName="console" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.546737 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.615507 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qmnvc"] Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.678460 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgqz2\" (UniqueName: \"kubernetes.io/projected/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-kube-api-access-jgqz2\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.679292 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-utilities\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.679373 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-catalog-content\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.877759 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgqz2\" (UniqueName: \"kubernetes.io/projected/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-kube-api-access-jgqz2\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.877868 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-utilities\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.877907 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-catalog-content\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.878802 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-utilities\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:25 crc kubenswrapper[4925]: I0121 11:12:25.879091 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-catalog-content\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:26 crc kubenswrapper[4925]: I0121 11:12:26.056947 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgqz2\" (UniqueName: \"kubernetes.io/projected/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-kube-api-access-jgqz2\") pod \"certified-operators-qmnvc\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:26 crc kubenswrapper[4925]: I0121 11:12:26.170327 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:26 crc kubenswrapper[4925]: I0121 11:12:26.471967 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qmnvc"] Jan 21 11:12:26 crc kubenswrapper[4925]: I0121 11:12:26.734914 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmnvc" event={"ID":"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf","Type":"ContainerStarted","Data":"40c9527713d00a109f50f9bd32ce8c3b95f5d8fb63d1a9c4f0e3adc29839b4cc"} Jan 21 11:12:27 crc kubenswrapper[4925]: I0121 11:12:27.745633 4925 generic.go:334] "Generic (PLEG): container finished" podID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerID="3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb" exitCode=0 Jan 21 11:12:27 crc kubenswrapper[4925]: I0121 11:12:27.745750 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmnvc" event={"ID":"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf","Type":"ContainerDied","Data":"3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb"} Jan 21 11:12:29 crc kubenswrapper[4925]: I0121 11:12:29.771044 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmnvc" event={"ID":"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf","Type":"ContainerStarted","Data":"f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413"} Jan 21 11:12:29 crc kubenswrapper[4925]: I0121 11:12:29.774784 4925 generic.go:334] "Generic (PLEG): container finished" podID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerID="43b212cfef2f9895223fe8af4bdae19139affa897490f86613b7c3dec2ad9049" exitCode=0 Jan 21 11:12:29 crc kubenswrapper[4925]: I0121 11:12:29.774843 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" event={"ID":"5dfe3134-6d2f-47d9-b786-f69a4fbcf164","Type":"ContainerDied","Data":"43b212cfef2f9895223fe8af4bdae19139affa897490f86613b7c3dec2ad9049"} Jan 21 11:12:30 crc kubenswrapper[4925]: I0121 11:12:30.789627 4925 generic.go:334] "Generic (PLEG): container finished" podID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerID="f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413" exitCode=0 Jan 21 11:12:30 crc kubenswrapper[4925]: I0121 11:12:30.789714 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmnvc" event={"ID":"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf","Type":"ContainerDied","Data":"f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413"} Jan 21 11:12:30 crc kubenswrapper[4925]: I0121 11:12:30.794991 4925 generic.go:334] "Generic (PLEG): container finished" podID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerID="41b2582d3c11ec2c62c7b14d81d1070696b6425af81f278b060feac78c4f56f9" exitCode=0 Jan 21 11:12:30 crc kubenswrapper[4925]: I0121 11:12:30.795054 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" event={"ID":"5dfe3134-6d2f-47d9-b786-f69a4fbcf164","Type":"ContainerDied","Data":"41b2582d3c11ec2c62c7b14d81d1070696b6425af81f278b060feac78c4f56f9"} Jan 21 11:12:32 crc kubenswrapper[4925]: I0121 11:12:32.948813 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmnvc" event={"ID":"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf","Type":"ContainerStarted","Data":"eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d"} Jan 21 11:12:32 crc kubenswrapper[4925]: I0121 11:12:32.977463 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qmnvc" podStartSLOduration=4.206529503 podStartE2EDuration="7.977430087s" podCreationTimestamp="2026-01-21 11:12:25 +0000 UTC" firstStartedPulling="2026-01-21 11:12:27.984081559 +0000 UTC m=+1039.587973493" lastFinishedPulling="2026-01-21 11:12:31.754982143 +0000 UTC m=+1043.358874077" observedRunningTime="2026-01-21 11:12:32.974039051 +0000 UTC m=+1044.577930985" watchObservedRunningTime="2026-01-21 11:12:32.977430087 +0000 UTC m=+1044.581322021" Jan 21 11:12:32 crc kubenswrapper[4925]: I0121 11:12:32.996683 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.129900 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-bundle\") pod \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.130050 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p74ll\" (UniqueName: \"kubernetes.io/projected/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-kube-api-access-p74ll\") pod \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.130134 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-util\") pod \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\" (UID: \"5dfe3134-6d2f-47d9-b786-f69a4fbcf164\") " Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.131651 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-bundle" (OuterVolumeSpecName: "bundle") pod "5dfe3134-6d2f-47d9-b786-f69a4fbcf164" (UID: "5dfe3134-6d2f-47d9-b786-f69a4fbcf164"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.139460 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-kube-api-access-p74ll" (OuterVolumeSpecName: "kube-api-access-p74ll") pod "5dfe3134-6d2f-47d9-b786-f69a4fbcf164" (UID: "5dfe3134-6d2f-47d9-b786-f69a4fbcf164"). InnerVolumeSpecName "kube-api-access-p74ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.142557 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-util" (OuterVolumeSpecName: "util") pod "5dfe3134-6d2f-47d9-b786-f69a4fbcf164" (UID: "5dfe3134-6d2f-47d9-b786-f69a4fbcf164"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.232550 4925 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.232642 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p74ll\" (UniqueName: \"kubernetes.io/projected/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-kube-api-access-p74ll\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:33 crc kubenswrapper[4925]: I0121 11:12:33.232671 4925 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5dfe3134-6d2f-47d9-b786-f69a4fbcf164-util\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:34 crc kubenswrapper[4925]: I0121 11:12:34.031607 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" Jan 21 11:12:34 crc kubenswrapper[4925]: I0121 11:12:34.031597 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76" event={"ID":"5dfe3134-6d2f-47d9-b786-f69a4fbcf164","Type":"ContainerDied","Data":"335465535c8c2bf92256fba0e49fc95850f3a0f6260abd4a06e47db66af581f7"} Jan 21 11:12:34 crc kubenswrapper[4925]: I0121 11:12:34.031730 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="335465535c8c2bf92256fba0e49fc95850f3a0f6260abd4a06e47db66af581f7" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.171595 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.172156 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.225901 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.351974 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l2vtw"] Jan 21 11:12:36 crc kubenswrapper[4925]: E0121 11:12:36.352341 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerName="pull" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.352359 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerName="pull" Jan 21 11:12:36 crc kubenswrapper[4925]: E0121 11:12:36.352383 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerName="extract" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.352412 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerName="extract" Jan 21 11:12:36 crc kubenswrapper[4925]: E0121 11:12:36.352427 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerName="util" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.352434 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerName="util" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.352591 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dfe3134-6d2f-47d9-b786-f69a4fbcf164" containerName="extract" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.353708 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.371942 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l2vtw"] Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.457023 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csczh\" (UniqueName: \"kubernetes.io/projected/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-kube-api-access-csczh\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.457097 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-catalog-content\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.457157 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-utilities\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.558708 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csczh\" (UniqueName: \"kubernetes.io/projected/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-kube-api-access-csczh\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.558780 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-catalog-content\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.558840 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-utilities\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.559546 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-utilities\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.559666 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-catalog-content\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.583640 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csczh\" (UniqueName: \"kubernetes.io/projected/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-kube-api-access-csczh\") pod \"redhat-marketplace-l2vtw\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:36 crc kubenswrapper[4925]: I0121 11:12:36.674670 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:37 crc kubenswrapper[4925]: I0121 11:12:37.183757 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:37 crc kubenswrapper[4925]: I0121 11:12:37.471890 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l2vtw"] Jan 21 11:12:38 crc kubenswrapper[4925]: I0121 11:12:38.125762 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l2vtw" event={"ID":"9cdd5347-6394-4951-a68c-b4dcfd80e1c4","Type":"ContainerStarted","Data":"c1ed4df42046343fa5085b1fc4430ba884e812a3297b8e3d55bd459badc6b980"} Jan 21 11:12:38 crc kubenswrapper[4925]: I0121 11:12:38.746986 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qmnvc"] Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.188283 4925 generic.go:334] "Generic (PLEG): container finished" podID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerID="12768bc9900e554319a828065fb2c4a66807816b96d1b70242b04e8255db91a8" exitCode=0 Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.188773 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qmnvc" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="registry-server" containerID="cri-o://eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d" gracePeriod=2 Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.190228 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l2vtw" event={"ID":"9cdd5347-6394-4951-a68c-b4dcfd80e1c4","Type":"ContainerDied","Data":"12768bc9900e554319a828065fb2c4a66807816b96d1b70242b04e8255db91a8"} Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.681587 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.884430 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-catalog-content\") pod \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.884598 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-utilities\") pod \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.884699 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgqz2\" (UniqueName: \"kubernetes.io/projected/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-kube-api-access-jgqz2\") pod \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\" (UID: \"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf\") " Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.885801 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-utilities" (OuterVolumeSpecName: "utilities") pod "c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" (UID: "c2d5f74e-aa23-46be-8f73-69e57e5ebbdf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.891950 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-kube-api-access-jgqz2" (OuterVolumeSpecName: "kube-api-access-jgqz2") pod "c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" (UID: "c2d5f74e-aa23-46be-8f73-69e57e5ebbdf"). InnerVolumeSpecName "kube-api-access-jgqz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.938928 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" (UID: "c2d5f74e-aa23-46be-8f73-69e57e5ebbdf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.987033 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.987083 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgqz2\" (UniqueName: \"kubernetes.io/projected/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-kube-api-access-jgqz2\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:39 crc kubenswrapper[4925]: I0121 11:12:39.987096 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.201955 4925 generic.go:334] "Generic (PLEG): container finished" podID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerID="eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d" exitCode=0 Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.202039 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmnvc" event={"ID":"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf","Type":"ContainerDied","Data":"eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d"} Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.202136 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmnvc" event={"ID":"c2d5f74e-aa23-46be-8f73-69e57e5ebbdf","Type":"ContainerDied","Data":"40c9527713d00a109f50f9bd32ce8c3b95f5d8fb63d1a9c4f0e3adc29839b4cc"} Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.202162 4925 scope.go:117] "RemoveContainer" containerID="eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.202153 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmnvc" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.231814 4925 scope.go:117] "RemoveContainer" containerID="f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.254769 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qmnvc"] Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.257888 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qmnvc"] Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.270843 4925 scope.go:117] "RemoveContainer" containerID="3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.298966 4925 scope.go:117] "RemoveContainer" containerID="eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d" Jan 21 11:12:40 crc kubenswrapper[4925]: E0121 11:12:40.299879 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d\": container with ID starting with eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d not found: ID does not exist" containerID="eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.300105 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d"} err="failed to get container status \"eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d\": rpc error: code = NotFound desc = could not find container \"eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d\": container with ID starting with eda4ff5afd534a633b8eb9f6f869581affed84cd905c72e9949b1ef70cec851d not found: ID does not exist" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.300158 4925 scope.go:117] "RemoveContainer" containerID="f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413" Jan 21 11:12:40 crc kubenswrapper[4925]: E0121 11:12:40.300783 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413\": container with ID starting with f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413 not found: ID does not exist" containerID="f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.300816 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413"} err="failed to get container status \"f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413\": rpc error: code = NotFound desc = could not find container \"f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413\": container with ID starting with f5048ec0047a271d1738f2a6ce579637714359f7e98128be7ee32546e0deb413 not found: ID does not exist" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.300833 4925 scope.go:117] "RemoveContainer" containerID="3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb" Jan 21 11:12:40 crc kubenswrapper[4925]: E0121 11:12:40.301201 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb\": container with ID starting with 3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb not found: ID does not exist" containerID="3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb" Jan 21 11:12:40 crc kubenswrapper[4925]: I0121 11:12:40.301257 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb"} err="failed to get container status \"3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb\": rpc error: code = NotFound desc = could not find container \"3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb\": container with ID starting with 3751472cf92fa8642bcc59257b57027e8692feaff5fa9092a27c086fd2e372bb not found: ID does not exist" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.215955 4925 generic.go:334] "Generic (PLEG): container finished" podID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerID="c91453122a4b5f85651ff5db3ea89a67c0a04cdbb27bbf16765c95c158126a8c" exitCode=0 Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.216044 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l2vtw" event={"ID":"9cdd5347-6394-4951-a68c-b4dcfd80e1c4","Type":"ContainerDied","Data":"c91453122a4b5f85651ff5db3ea89a67c0a04cdbb27bbf16765c95c158126a8c"} Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.522532 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" path="/var/lib/kubelet/pods/c2d5f74e-aa23-46be-8f73-69e57e5ebbdf/volumes" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.953367 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk"] Jan 21 11:12:41 crc kubenswrapper[4925]: E0121 11:12:41.954219 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="registry-server" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.954245 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="registry-server" Jan 21 11:12:41 crc kubenswrapper[4925]: E0121 11:12:41.954264 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="extract-utilities" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.954272 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="extract-utilities" Jan 21 11:12:41 crc kubenswrapper[4925]: E0121 11:12:41.954296 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="extract-content" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.954303 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="extract-content" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.956421 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d5f74e-aa23-46be-8f73-69e57e5ebbdf" containerName="registry-server" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.957283 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.964308 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.964380 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.964767 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.965308 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-xcgvj" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.965520 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 21 11:12:41 crc kubenswrapper[4925]: I0121 11:12:41.970144 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk"] Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.030593 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czr7m\" (UniqueName: \"kubernetes.io/projected/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-kube-api-access-czr7m\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.030709 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-webhook-cert\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.030784 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-apiservice-cert\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.132197 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-apiservice-cert\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.132290 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czr7m\" (UniqueName: \"kubernetes.io/projected/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-kube-api-access-czr7m\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.132348 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-webhook-cert\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.144847 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-apiservice-cert\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.160114 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-webhook-cert\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.160200 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czr7m\" (UniqueName: \"kubernetes.io/projected/1f8eea58-9366-4bb1-a9d2-dc8842674dc2-kube-api-access-czr7m\") pod \"metallb-operator-controller-manager-57547767ff-zrxjk\" (UID: \"1f8eea58-9366-4bb1-a9d2-dc8842674dc2\") " pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.231289 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l2vtw" event={"ID":"9cdd5347-6394-4951-a68c-b4dcfd80e1c4","Type":"ContainerStarted","Data":"555dbe82588b04fe5bb45ec956fb4d0d74a5b647669fcde80ad0b69ea7ed9940"} Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.258329 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l2vtw" podStartSLOduration=3.775955321 podStartE2EDuration="6.258306346s" podCreationTimestamp="2026-01-21 11:12:36 +0000 UTC" firstStartedPulling="2026-01-21 11:12:39.192631309 +0000 UTC m=+1050.796523253" lastFinishedPulling="2026-01-21 11:12:41.674982344 +0000 UTC m=+1053.278874278" observedRunningTime="2026-01-21 11:12:42.256580062 +0000 UTC m=+1053.860472016" watchObservedRunningTime="2026-01-21 11:12:42.258306346 +0000 UTC m=+1053.862198280" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.276726 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.329657 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895"] Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.333411 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.336154 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/955477b1-b9f0-41a2-aa5b-2e2f47495422-webhook-cert\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.336550 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/955477b1-b9f0-41a2-aa5b-2e2f47495422-apiservice-cert\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.336666 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s689j\" (UniqueName: \"kubernetes.io/projected/955477b1-b9f0-41a2-aa5b-2e2f47495422-kube-api-access-s689j\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.339069 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-z5nxs" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.339425 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.339650 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.364478 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895"] Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.437263 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/955477b1-b9f0-41a2-aa5b-2e2f47495422-webhook-cert\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.437373 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/955477b1-b9f0-41a2-aa5b-2e2f47495422-apiservice-cert\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.437440 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s689j\" (UniqueName: \"kubernetes.io/projected/955477b1-b9f0-41a2-aa5b-2e2f47495422-kube-api-access-s689j\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.442689 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/955477b1-b9f0-41a2-aa5b-2e2f47495422-webhook-cert\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.453776 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/955477b1-b9f0-41a2-aa5b-2e2f47495422-apiservice-cert\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.465874 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s689j\" (UniqueName: \"kubernetes.io/projected/955477b1-b9f0-41a2-aa5b-2e2f47495422-kube-api-access-s689j\") pod \"metallb-operator-webhook-server-5b9dd8b59d-59895\" (UID: \"955477b1-b9f0-41a2-aa5b-2e2f47495422\") " pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.672338 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:42 crc kubenswrapper[4925]: I0121 11:12:42.924000 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk"] Jan 21 11:12:42 crc kubenswrapper[4925]: W0121 11:12:42.951549 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f8eea58_9366_4bb1_a9d2_dc8842674dc2.slice/crio-76af9cf575e2738f0879dac6de21fdc5de283e62bc407efa1ddb06839760de88 WatchSource:0}: Error finding container 76af9cf575e2738f0879dac6de21fdc5de283e62bc407efa1ddb06839760de88: Status 404 returned error can't find the container with id 76af9cf575e2738f0879dac6de21fdc5de283e62bc407efa1ddb06839760de88 Jan 21 11:12:43 crc kubenswrapper[4925]: I0121 11:12:43.164201 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895"] Jan 21 11:12:43 crc kubenswrapper[4925]: W0121 11:12:43.174033 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod955477b1_b9f0_41a2_aa5b_2e2f47495422.slice/crio-723db4e70a4d2b0a11d1e84a36247062d579e180c25d23b3f76eddd573fd3eb9 WatchSource:0}: Error finding container 723db4e70a4d2b0a11d1e84a36247062d579e180c25d23b3f76eddd573fd3eb9: Status 404 returned error can't find the container with id 723db4e70a4d2b0a11d1e84a36247062d579e180c25d23b3f76eddd573fd3eb9 Jan 21 11:12:43 crc kubenswrapper[4925]: I0121 11:12:43.240510 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" event={"ID":"955477b1-b9f0-41a2-aa5b-2e2f47495422","Type":"ContainerStarted","Data":"723db4e70a4d2b0a11d1e84a36247062d579e180c25d23b3f76eddd573fd3eb9"} Jan 21 11:12:43 crc kubenswrapper[4925]: I0121 11:12:43.242535 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" event={"ID":"1f8eea58-9366-4bb1-a9d2-dc8842674dc2","Type":"ContainerStarted","Data":"76af9cf575e2738f0879dac6de21fdc5de283e62bc407efa1ddb06839760de88"} Jan 21 11:12:46 crc kubenswrapper[4925]: I0121 11:12:46.675444 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:46 crc kubenswrapper[4925]: I0121 11:12:46.675929 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:46 crc kubenswrapper[4925]: I0121 11:12:46.752680 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:47 crc kubenswrapper[4925]: I0121 11:12:47.511246 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:49 crc kubenswrapper[4925]: I0121 11:12:49.142221 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l2vtw"] Jan 21 11:12:49 crc kubenswrapper[4925]: I0121 11:12:49.355223 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l2vtw" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="registry-server" containerID="cri-o://555dbe82588b04fe5bb45ec956fb4d0d74a5b647669fcde80ad0b69ea7ed9940" gracePeriod=2 Jan 21 11:12:49 crc kubenswrapper[4925]: I0121 11:12:49.941056 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:12:49 crc kubenswrapper[4925]: I0121 11:12:49.941178 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:12:50 crc kubenswrapper[4925]: I0121 11:12:50.366580 4925 generic.go:334] "Generic (PLEG): container finished" podID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerID="555dbe82588b04fe5bb45ec956fb4d0d74a5b647669fcde80ad0b69ea7ed9940" exitCode=0 Jan 21 11:12:50 crc kubenswrapper[4925]: I0121 11:12:50.366668 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l2vtw" event={"ID":"9cdd5347-6394-4951-a68c-b4dcfd80e1c4","Type":"ContainerDied","Data":"555dbe82588b04fe5bb45ec956fb4d0d74a5b647669fcde80ad0b69ea7ed9940"} Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.336269 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.402078 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l2vtw" event={"ID":"9cdd5347-6394-4951-a68c-b4dcfd80e1c4","Type":"ContainerDied","Data":"c1ed4df42046343fa5085b1fc4430ba884e812a3297b8e3d55bd459badc6b980"} Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.402161 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l2vtw" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.402268 4925 scope.go:117] "RemoveContainer" containerID="555dbe82588b04fe5bb45ec956fb4d0d74a5b647669fcde80ad0b69ea7ed9940" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.415847 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" event={"ID":"1f8eea58-9366-4bb1-a9d2-dc8842674dc2","Type":"ContainerStarted","Data":"5d0b1ee78610bcfcdf2714de219587a4a3b1062ac9941afba2409c716553cd90"} Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.417022 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.420529 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" event={"ID":"955477b1-b9f0-41a2-aa5b-2e2f47495422","Type":"ContainerStarted","Data":"066db10c66ab155d9c63d7eb74d442c455b6db728d8fc1f6a74c1bd9472adf2c"} Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.421386 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.432789 4925 scope.go:117] "RemoveContainer" containerID="c91453122a4b5f85651ff5db3ea89a67c0a04cdbb27bbf16765c95c158126a8c" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.453299 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" podStartSLOduration=2.373076732 podStartE2EDuration="12.453267937s" podCreationTimestamp="2026-01-21 11:12:41 +0000 UTC" firstStartedPulling="2026-01-21 11:12:42.956708235 +0000 UTC m=+1054.560600179" lastFinishedPulling="2026-01-21 11:12:53.03689945 +0000 UTC m=+1064.640791384" observedRunningTime="2026-01-21 11:12:53.444462068 +0000 UTC m=+1065.048354022" watchObservedRunningTime="2026-01-21 11:12:53.453267937 +0000 UTC m=+1065.057159881" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.467728 4925 scope.go:117] "RemoveContainer" containerID="12768bc9900e554319a828065fb2c4a66807816b96d1b70242b04e8255db91a8" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.476055 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" podStartSLOduration=1.589950239 podStartE2EDuration="11.476027577s" podCreationTimestamp="2026-01-21 11:12:42 +0000 UTC" firstStartedPulling="2026-01-21 11:12:43.180192783 +0000 UTC m=+1054.784084717" lastFinishedPulling="2026-01-21 11:12:53.066270131 +0000 UTC m=+1064.670162055" observedRunningTime="2026-01-21 11:12:53.474193329 +0000 UTC m=+1065.078085273" watchObservedRunningTime="2026-01-21 11:12:53.476027577 +0000 UTC m=+1065.079919511" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.493230 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-utilities\") pod \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.493307 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-catalog-content\") pod \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.493385 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csczh\" (UniqueName: \"kubernetes.io/projected/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-kube-api-access-csczh\") pod \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\" (UID: \"9cdd5347-6394-4951-a68c-b4dcfd80e1c4\") " Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.495072 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-utilities" (OuterVolumeSpecName: "utilities") pod "9cdd5347-6394-4951-a68c-b4dcfd80e1c4" (UID: "9cdd5347-6394-4951-a68c-b4dcfd80e1c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.504245 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-kube-api-access-csczh" (OuterVolumeSpecName: "kube-api-access-csczh") pod "9cdd5347-6394-4951-a68c-b4dcfd80e1c4" (UID: "9cdd5347-6394-4951-a68c-b4dcfd80e1c4"). InnerVolumeSpecName "kube-api-access-csczh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.522183 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9cdd5347-6394-4951-a68c-b4dcfd80e1c4" (UID: "9cdd5347-6394-4951-a68c-b4dcfd80e1c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.602184 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.602685 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.602803 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csczh\" (UniqueName: \"kubernetes.io/projected/9cdd5347-6394-4951-a68c-b4dcfd80e1c4-kube-api-access-csczh\") on node \"crc\" DevicePath \"\"" Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.750292 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l2vtw"] Jan 21 11:12:53 crc kubenswrapper[4925]: I0121 11:12:53.756687 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l2vtw"] Jan 21 11:12:55 crc kubenswrapper[4925]: I0121 11:12:55.514303 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" path="/var/lib/kubelet/pods/9cdd5347-6394-4951-a68c-b4dcfd80e1c4/volumes" Jan 21 11:13:12 crc kubenswrapper[4925]: I0121 11:13:12.680609 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5b9dd8b59d-59895" Jan 21 11:13:19 crc kubenswrapper[4925]: I0121 11:13:19.940914 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:13:19 crc kubenswrapper[4925]: I0121 11:13:19.941558 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:13:32 crc kubenswrapper[4925]: I0121 11:13:32.281871 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-57547767ff-zrxjk" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.222338 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-7tz4m"] Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.223565 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="extract-content" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.223599 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="extract-content" Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.223705 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="extract-utilities" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.223721 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="extract-utilities" Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.223740 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="registry-server" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.223749 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="registry-server" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.224170 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cdd5347-6394-4951-a68c-b4dcfd80e1c4" containerName="registry-server" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.248062 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c"] Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.254522 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.254537 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263239 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl96t\" (UniqueName: \"kubernetes.io/projected/9449246f-d4a0-407f-8e9f-cb7271c90d72-kube-api-access-vl96t\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263328 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-reloader\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263366 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-metrics\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263446 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-startup\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263533 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b7695ad-2b58-4be9-911d-bc83bece0db7-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9d94c\" (UID: \"0b7695ad-2b58-4be9-911d-bc83bece0db7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263620 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9p2x\" (UniqueName: \"kubernetes.io/projected/0b7695ad-2b58-4be9-911d-bc83bece0db7-kube-api-access-j9p2x\") pod \"frr-k8s-webhook-server-7df86c4f6c-9d94c\" (UID: \"0b7695ad-2b58-4be9-911d-bc83bece0db7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263655 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-conf\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263683 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-sockets\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.263708 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9449246f-d4a0-407f-8e9f-cb7271c90d72-metrics-certs\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.266779 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.266836 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-85prl" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.266942 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.267119 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.274170 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c"] Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.353038 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-zxq6z"] Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.354549 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.359735 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.360442 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.360565 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.362927 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-6j7jk" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.365784 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-conf\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.365834 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-sockets\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366491 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9449246f-d4a0-407f-8e9f-cb7271c90d72-metrics-certs\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366413 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-sockets\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366552 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-conf\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366667 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjdj7\" (UniqueName: \"kubernetes.io/projected/0ac5019d-ffb4-4cb6-9042-1b983b15841a-kube-api-access-pjdj7\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366714 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metallb-excludel2\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366786 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl96t\" (UniqueName: \"kubernetes.io/projected/9449246f-d4a0-407f-8e9f-cb7271c90d72-kube-api-access-vl96t\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366850 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-reloader\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366879 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-metrics\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366929 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-startup\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.366974 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.367040 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b7695ad-2b58-4be9-911d-bc83bece0db7-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9d94c\" (UID: \"0b7695ad-2b58-4be9-911d-bc83bece0db7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.367111 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metrics-certs\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.367157 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9p2x\" (UniqueName: \"kubernetes.io/projected/0b7695ad-2b58-4be9-911d-bc83bece0db7-kube-api-access-j9p2x\") pod \"frr-k8s-webhook-server-7df86c4f6c-9d94c\" (UID: \"0b7695ad-2b58-4be9-911d-bc83bece0db7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.368066 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-reloader\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.368454 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9449246f-d4a0-407f-8e9f-cb7271c90d72-metrics\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.369508 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9449246f-d4a0-407f-8e9f-cb7271c90d72-frr-startup\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.371943 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-q24bt"] Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.373406 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.377860 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b7695ad-2b58-4be9-911d-bc83bece0db7-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9d94c\" (UID: \"0b7695ad-2b58-4be9-911d-bc83bece0db7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.379176 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9449246f-d4a0-407f-8e9f-cb7271c90d72-metrics-certs\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.401321 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-q24bt"] Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.407487 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.412057 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9p2x\" (UniqueName: \"kubernetes.io/projected/0b7695ad-2b58-4be9-911d-bc83bece0db7-kube-api-access-j9p2x\") pod \"frr-k8s-webhook-server-7df86c4f6c-9d94c\" (UID: \"0b7695ad-2b58-4be9-911d-bc83bece0db7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.423148 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl96t\" (UniqueName: \"kubernetes.io/projected/9449246f-d4a0-407f-8e9f-cb7271c90d72-kube-api-access-vl96t\") pod \"frr-k8s-7tz4m\" (UID: \"9449246f-d4a0-407f-8e9f-cb7271c90d72\") " pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.467805 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlvsm\" (UniqueName: \"kubernetes.io/projected/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-kube-api-access-jlvsm\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.467877 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjdj7\" (UniqueName: \"kubernetes.io/projected/0ac5019d-ffb4-4cb6-9042-1b983b15841a-kube-api-access-pjdj7\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.467923 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metallb-excludel2\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.467961 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-metrics-certs\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.468205 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-cert\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.468346 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.468541 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metrics-certs\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.468539 4925 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.468605 4925 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.468820 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist podName:0ac5019d-ffb4-4cb6-9042-1b983b15841a nodeName:}" failed. No retries permitted until 2026-01-21 11:13:33.968701095 +0000 UTC m=+1105.572593029 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist") pod "speaker-zxq6z" (UID: "0ac5019d-ffb4-4cb6-9042-1b983b15841a") : secret "metallb-memberlist" not found Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.468857 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metrics-certs podName:0ac5019d-ffb4-4cb6-9042-1b983b15841a nodeName:}" failed. No retries permitted until 2026-01-21 11:13:33.968837149 +0000 UTC m=+1105.572729083 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metrics-certs") pod "speaker-zxq6z" (UID: "0ac5019d-ffb4-4cb6-9042-1b983b15841a") : secret "speaker-certs-secret" not found Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.469233 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metallb-excludel2\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.494083 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjdj7\" (UniqueName: \"kubernetes.io/projected/0ac5019d-ffb4-4cb6-9042-1b983b15841a-kube-api-access-pjdj7\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.569577 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-metrics-certs\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.569668 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-cert\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.569793 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlvsm\" (UniqueName: \"kubernetes.io/projected/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-kube-api-access-jlvsm\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.569826 4925 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.569942 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-metrics-certs podName:66dc8772-25c5-4ad1-b0fa-6981e3158ad5 nodeName:}" failed. No retries permitted until 2026-01-21 11:13:34.069918635 +0000 UTC m=+1105.673810569 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-metrics-certs") pod "controller-6968d8fdc4-q24bt" (UID: "66dc8772-25c5-4ad1-b0fa-6981e3158ad5") : secret "controller-certs-secret" not found Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.572027 4925 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.584578 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-cert\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.592135 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlvsm\" (UniqueName: \"kubernetes.io/projected/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-kube-api-access-jlvsm\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.601770 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.610381 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.904216 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c"] Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.977091 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.977183 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metrics-certs\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.977876 4925 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 21 11:13:33 crc kubenswrapper[4925]: E0121 11:13:33.977972 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist podName:0ac5019d-ffb4-4cb6-9042-1b983b15841a nodeName:}" failed. No retries permitted until 2026-01-21 11:13:34.977948187 +0000 UTC m=+1106.581840121 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist") pod "speaker-zxq6z" (UID: "0ac5019d-ffb4-4cb6-9042-1b983b15841a") : secret "metallb-memberlist" not found Jan 21 11:13:33 crc kubenswrapper[4925]: I0121 11:13:33.982782 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-metrics-certs\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.078740 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-metrics-certs\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.083954 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66dc8772-25c5-4ad1-b0fa-6981e3158ad5-metrics-certs\") pod \"controller-6968d8fdc4-q24bt\" (UID: \"66dc8772-25c5-4ad1-b0fa-6981e3158ad5\") " pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.381072 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.610454 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-q24bt"] Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.773254 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerStarted","Data":"0715d24d3722a214d53aa5c1c96e4d34401db3d24427566917ac647850429e06"} Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.774621 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" event={"ID":"0b7695ad-2b58-4be9-911d-bc83bece0db7","Type":"ContainerStarted","Data":"1d806d7099d9be9ceea69ff1899624123598da8705d5e7e7fd8feb00cc585ca0"} Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.775677 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-q24bt" event={"ID":"66dc8772-25c5-4ad1-b0fa-6981e3158ad5","Type":"ContainerStarted","Data":"766513812c80044f3706cbec59f2157783476468aef6819982d1ff3c78882855"} Jan 21 11:13:34 crc kubenswrapper[4925]: I0121 11:13:34.998575 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:34 crc kubenswrapper[4925]: E0121 11:13:34.998768 4925 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 21 11:13:34 crc kubenswrapper[4925]: E0121 11:13:34.998862 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist podName:0ac5019d-ffb4-4cb6-9042-1b983b15841a nodeName:}" failed. No retries permitted until 2026-01-21 11:13:36.998837277 +0000 UTC m=+1108.602729221 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist") pod "speaker-zxq6z" (UID: "0ac5019d-ffb4-4cb6-9042-1b983b15841a") : secret "metallb-memberlist" not found Jan 21 11:13:35 crc kubenswrapper[4925]: I0121 11:13:35.786103 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-q24bt" event={"ID":"66dc8772-25c5-4ad1-b0fa-6981e3158ad5","Type":"ContainerStarted","Data":"19d589db3509f87aa458e4c039ce59b4a4e14cf6ae64f2d990c7a1a688ed9139"} Jan 21 11:13:35 crc kubenswrapper[4925]: I0121 11:13:35.786683 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-q24bt" event={"ID":"66dc8772-25c5-4ad1-b0fa-6981e3158ad5","Type":"ContainerStarted","Data":"f11bae5b82cfdf4c8b41e2a6a716fd04618f28e40285b3bf95fc5445c2159373"} Jan 21 11:13:35 crc kubenswrapper[4925]: I0121 11:13:35.787955 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:35 crc kubenswrapper[4925]: I0121 11:13:35.820537 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-q24bt" podStartSLOduration=2.820508457 podStartE2EDuration="2.820508457s" podCreationTimestamp="2026-01-21 11:13:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:13:35.81618864 +0000 UTC m=+1107.420080574" watchObservedRunningTime="2026-01-21 11:13:35.820508457 +0000 UTC m=+1107.424400391" Jan 21 11:13:37 crc kubenswrapper[4925]: I0121 11:13:37.056445 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:37 crc kubenswrapper[4925]: I0121 11:13:37.089253 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0ac5019d-ffb4-4cb6-9042-1b983b15841a-memberlist\") pod \"speaker-zxq6z\" (UID: \"0ac5019d-ffb4-4cb6-9042-1b983b15841a\") " pod="metallb-system/speaker-zxq6z" Jan 21 11:13:37 crc kubenswrapper[4925]: I0121 11:13:37.441371 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zxq6z" Jan 21 11:13:37 crc kubenswrapper[4925]: I0121 11:13:37.819146 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zxq6z" event={"ID":"0ac5019d-ffb4-4cb6-9042-1b983b15841a","Type":"ContainerStarted","Data":"60336de81afb1cb4fe0245ca2d79651ab2cc8bb370bee671664868ab3488583e"} Jan 21 11:13:38 crc kubenswrapper[4925]: I0121 11:13:38.841133 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zxq6z" event={"ID":"0ac5019d-ffb4-4cb6-9042-1b983b15841a","Type":"ContainerStarted","Data":"49d2fbd5955eec47a8c2f6e8ec35a40cfc359bd4ca4684f4b1b83d0f85a60cf3"} Jan 21 11:13:38 crc kubenswrapper[4925]: I0121 11:13:38.841739 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zxq6z" event={"ID":"0ac5019d-ffb4-4cb6-9042-1b983b15841a","Type":"ContainerStarted","Data":"c9de200d2feae670e2c6eb44cbc677bf6d2b46993dc9ef3777caf1cb0cd6bc3e"} Jan 21 11:13:38 crc kubenswrapper[4925]: I0121 11:13:38.844074 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-zxq6z" Jan 21 11:13:39 crc kubenswrapper[4925]: I0121 11:13:39.531168 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-zxq6z" podStartSLOduration=6.531146287 podStartE2EDuration="6.531146287s" podCreationTimestamp="2026-01-21 11:13:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:13:38.866839567 +0000 UTC m=+1110.470731521" watchObservedRunningTime="2026-01-21 11:13:39.531146287 +0000 UTC m=+1111.135038221" Jan 21 11:13:44 crc kubenswrapper[4925]: I0121 11:13:44.387531 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-q24bt" Jan 21 11:13:47 crc kubenswrapper[4925]: I0121 11:13:47.449318 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-zxq6z" Jan 21 11:13:48 crc kubenswrapper[4925]: I0121 11:13:48.085597 4925 generic.go:334] "Generic (PLEG): container finished" podID="9449246f-d4a0-407f-8e9f-cb7271c90d72" containerID="e649f85d0bf3d726c9a2499f1b73d4da87d897fd4aba00c4885343908a214412" exitCode=0 Jan 21 11:13:48 crc kubenswrapper[4925]: I0121 11:13:48.085794 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerDied","Data":"e649f85d0bf3d726c9a2499f1b73d4da87d897fd4aba00c4885343908a214412"} Jan 21 11:13:48 crc kubenswrapper[4925]: I0121 11:13:48.094443 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" event={"ID":"0b7695ad-2b58-4be9-911d-bc83bece0db7","Type":"ContainerStarted","Data":"ae01aede2a5e7716b4b0763456bd21211689b6e6e76fe27d201261dd3ecb0c9f"} Jan 21 11:13:48 crc kubenswrapper[4925]: I0121 11:13:48.095508 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:13:48 crc kubenswrapper[4925]: I0121 11:13:48.181096 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" podStartSLOduration=1.654764004 podStartE2EDuration="15.181072554s" podCreationTimestamp="2026-01-21 11:13:33 +0000 UTC" firstStartedPulling="2026-01-21 11:13:33.917917523 +0000 UTC m=+1105.521809457" lastFinishedPulling="2026-01-21 11:13:47.444226073 +0000 UTC m=+1119.048118007" observedRunningTime="2026-01-21 11:13:48.180816016 +0000 UTC m=+1119.784707950" watchObservedRunningTime="2026-01-21 11:13:48.181072554 +0000 UTC m=+1119.784964488" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.086516 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm"] Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.088221 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.088957 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmlkn\" (UniqueName: \"kubernetes.io/projected/b4b9e49f-2140-42de-b29f-6241bafc109e-kube-api-access-wmlkn\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.089051 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.089271 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.090592 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.102247 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm"] Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.105242 4925 generic.go:334] "Generic (PLEG): container finished" podID="9449246f-d4a0-407f-8e9f-cb7271c90d72" containerID="b6de39379e7d73efd1a1cf9e69d2b7934b4c7e79f69753d0bc62193d9403c282" exitCode=0 Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.105356 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerDied","Data":"b6de39379e7d73efd1a1cf9e69d2b7934b4c7e79f69753d0bc62193d9403c282"} Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.191576 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.192073 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmlkn\" (UniqueName: \"kubernetes.io/projected/b4b9e49f-2140-42de-b29f-6241bafc109e-kube-api-access-wmlkn\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.192324 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.192454 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.193280 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.217504 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmlkn\" (UniqueName: \"kubernetes.io/projected/b4b9e49f-2140-42de-b29f-6241bafc109e-kube-api-access-wmlkn\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.410203 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.882176 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm"] Jan 21 11:13:49 crc kubenswrapper[4925]: W0121 11:13:49.890644 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4b9e49f_2140_42de_b29f_6241bafc109e.slice/crio-29da556f19fd4556624edc873f6aade15d539972eada65cfdf948233f064c4d3 WatchSource:0}: Error finding container 29da556f19fd4556624edc873f6aade15d539972eada65cfdf948233f064c4d3: Status 404 returned error can't find the container with id 29da556f19fd4556624edc873f6aade15d539972eada65cfdf948233f064c4d3 Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.941313 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.941416 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.941476 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.942143 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e772253e4c2e0ac8edf4468d742ee24fdcac170b16df83d5dd4bb209eb0b7a25"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:13:49 crc kubenswrapper[4925]: I0121 11:13:49.942210 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://e772253e4c2e0ac8edf4468d742ee24fdcac170b16df83d5dd4bb209eb0b7a25" gracePeriod=600 Jan 21 11:13:50 crc kubenswrapper[4925]: I0121 11:13:50.112777 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" event={"ID":"b4b9e49f-2140-42de-b29f-6241bafc109e","Type":"ContainerStarted","Data":"240f71cc57774b45159f54dd4adcdb251b3bcb90380860a05737185f21bb91c0"} Jan 21 11:13:50 crc kubenswrapper[4925]: I0121 11:13:50.113304 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" event={"ID":"b4b9e49f-2140-42de-b29f-6241bafc109e","Type":"ContainerStarted","Data":"29da556f19fd4556624edc873f6aade15d539972eada65cfdf948233f064c4d3"} Jan 21 11:13:50 crc kubenswrapper[4925]: I0121 11:13:50.118306 4925 generic.go:334] "Generic (PLEG): container finished" podID="9449246f-d4a0-407f-8e9f-cb7271c90d72" containerID="ac05e3904136374beacce0e6b644185b46eee55db3307fb2d49becda6a7e064a" exitCode=0 Jan 21 11:13:50 crc kubenswrapper[4925]: I0121 11:13:50.118473 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerDied","Data":"ac05e3904136374beacce0e6b644185b46eee55db3307fb2d49becda6a7e064a"} Jan 21 11:13:50 crc kubenswrapper[4925]: I0121 11:13:50.131415 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="e772253e4c2e0ac8edf4468d742ee24fdcac170b16df83d5dd4bb209eb0b7a25" exitCode=0 Jan 21 11:13:50 crc kubenswrapper[4925]: I0121 11:13:50.132098 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"e772253e4c2e0ac8edf4468d742ee24fdcac170b16df83d5dd4bb209eb0b7a25"} Jan 21 11:13:50 crc kubenswrapper[4925]: I0121 11:13:50.132147 4925 scope.go:117] "RemoveContainer" containerID="e3f868ed9651e50a998c56f421dcf313de9cb0d8cc843ecff23b89cfae066e06" Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.143073 4925 generic.go:334] "Generic (PLEG): container finished" podID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerID="240f71cc57774b45159f54dd4adcdb251b3bcb90380860a05737185f21bb91c0" exitCode=0 Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.143570 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" event={"ID":"b4b9e49f-2140-42de-b29f-6241bafc109e","Type":"ContainerDied","Data":"240f71cc57774b45159f54dd4adcdb251b3bcb90380860a05737185f21bb91c0"} Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.158307 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerStarted","Data":"a817a6d7a1125710862b0950559e761e55ba7dc7aeab5dd7bffecb1e42232867"} Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.158790 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerStarted","Data":"08c0624650ad8f34ad26b18d4fb07c81a4758205fa2aa05ae986f40397f844d3"} Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.158811 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerStarted","Data":"a4e4d0ffb0b7703de177fe6d591090d4069aa8205302df9889886c8d936818ba"} Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.158827 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerStarted","Data":"cd568d615b1f51e423c1d5997916d3457a43ae19daecc007ba00a9f836d5fdd5"} Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.158840 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerStarted","Data":"3d16281bd816e99bb4ae7541d02ec601b0c89b030a491984edca0375e2c071b6"} Jan 21 11:13:51 crc kubenswrapper[4925]: I0121 11:13:51.173816 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"67d412d76a3774c8b426878268b1816585378c0b05acfee3e5041ad5e7dbd93a"} Jan 21 11:13:52 crc kubenswrapper[4925]: I0121 11:13:52.192973 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7tz4m" event={"ID":"9449246f-d4a0-407f-8e9f-cb7271c90d72","Type":"ContainerStarted","Data":"a3323f07feda1531665cd7435164a347f83815e640824c32830251386bf12e67"} Jan 21 11:13:52 crc kubenswrapper[4925]: I0121 11:13:52.193631 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:52 crc kubenswrapper[4925]: I0121 11:13:52.230111 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-7tz4m" podStartSLOduration=5.527550966 podStartE2EDuration="19.230065195s" podCreationTimestamp="2026-01-21 11:13:33 +0000 UTC" firstStartedPulling="2026-01-21 11:13:33.77217003 +0000 UTC m=+1105.376061964" lastFinishedPulling="2026-01-21 11:13:47.474684249 +0000 UTC m=+1119.078576193" observedRunningTime="2026-01-21 11:13:52.223706343 +0000 UTC m=+1123.827598297" watchObservedRunningTime="2026-01-21 11:13:52.230065195 +0000 UTC m=+1123.833957129" Jan 21 11:13:53 crc kubenswrapper[4925]: I0121 11:13:53.603085 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:53 crc kubenswrapper[4925]: I0121 11:13:53.653736 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:13:58 crc kubenswrapper[4925]: I0121 11:13:58.334825 4925 generic.go:334] "Generic (PLEG): container finished" podID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerID="47e3d593ed52cfe322f25cb589c98554c4287358dda373175917dcdb9361a6ae" exitCode=0 Jan 21 11:13:58 crc kubenswrapper[4925]: I0121 11:13:58.334946 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" event={"ID":"b4b9e49f-2140-42de-b29f-6241bafc109e","Type":"ContainerDied","Data":"47e3d593ed52cfe322f25cb589c98554c4287358dda373175917dcdb9361a6ae"} Jan 21 11:13:59 crc kubenswrapper[4925]: I0121 11:13:59.348125 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" event={"ID":"b4b9e49f-2140-42de-b29f-6241bafc109e","Type":"ContainerStarted","Data":"01079afda587b472cabd6ff976522210f70aa91e1cdf65e9cdc529d62ccd862e"} Jan 21 11:14:00 crc kubenswrapper[4925]: I0121 11:14:00.356609 4925 generic.go:334] "Generic (PLEG): container finished" podID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerID="01079afda587b472cabd6ff976522210f70aa91e1cdf65e9cdc529d62ccd862e" exitCode=0 Jan 21 11:14:00 crc kubenswrapper[4925]: I0121 11:14:00.356662 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" event={"ID":"b4b9e49f-2140-42de-b29f-6241bafc109e","Type":"ContainerDied","Data":"01079afda587b472cabd6ff976522210f70aa91e1cdf65e9cdc529d62ccd862e"} Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.719518 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.894948 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmlkn\" (UniqueName: \"kubernetes.io/projected/b4b9e49f-2140-42de-b29f-6241bafc109e-kube-api-access-wmlkn\") pod \"b4b9e49f-2140-42de-b29f-6241bafc109e\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.895128 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-util\") pod \"b4b9e49f-2140-42de-b29f-6241bafc109e\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.895186 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-bundle\") pod \"b4b9e49f-2140-42de-b29f-6241bafc109e\" (UID: \"b4b9e49f-2140-42de-b29f-6241bafc109e\") " Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.896450 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-bundle" (OuterVolumeSpecName: "bundle") pod "b4b9e49f-2140-42de-b29f-6241bafc109e" (UID: "b4b9e49f-2140-42de-b29f-6241bafc109e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.903938 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4b9e49f-2140-42de-b29f-6241bafc109e-kube-api-access-wmlkn" (OuterVolumeSpecName: "kube-api-access-wmlkn") pod "b4b9e49f-2140-42de-b29f-6241bafc109e" (UID: "b4b9e49f-2140-42de-b29f-6241bafc109e"). InnerVolumeSpecName "kube-api-access-wmlkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.907764 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-util" (OuterVolumeSpecName: "util") pod "b4b9e49f-2140-42de-b29f-6241bafc109e" (UID: "b4b9e49f-2140-42de-b29f-6241bafc109e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.997326 4925 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.997434 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmlkn\" (UniqueName: \"kubernetes.io/projected/b4b9e49f-2140-42de-b29f-6241bafc109e-kube-api-access-wmlkn\") on node \"crc\" DevicePath \"\"" Jan 21 11:14:01 crc kubenswrapper[4925]: I0121 11:14:01.997451 4925 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b4b9e49f-2140-42de-b29f-6241bafc109e-util\") on node \"crc\" DevicePath \"\"" Jan 21 11:14:02 crc kubenswrapper[4925]: I0121 11:14:02.373682 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" event={"ID":"b4b9e49f-2140-42de-b29f-6241bafc109e","Type":"ContainerDied","Data":"29da556f19fd4556624edc873f6aade15d539972eada65cfdf948233f064c4d3"} Jan 21 11:14:02 crc kubenswrapper[4925]: I0121 11:14:02.373733 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm" Jan 21 11:14:02 crc kubenswrapper[4925]: I0121 11:14:02.373736 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29da556f19fd4556624edc873f6aade15d539972eada65cfdf948233f064c4d3" Jan 21 11:14:03 crc kubenswrapper[4925]: I0121 11:14:03.710209 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9d94c" Jan 21 11:14:03 crc kubenswrapper[4925]: I0121 11:14:03.710995 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-7tz4m" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.725559 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl"] Jan 21 11:14:08 crc kubenswrapper[4925]: E0121 11:14:08.726078 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerName="util" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.726098 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerName="util" Jan 21 11:14:08 crc kubenswrapper[4925]: E0121 11:14:08.726112 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerName="extract" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.726118 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerName="extract" Jan 21 11:14:08 crc kubenswrapper[4925]: E0121 11:14:08.726137 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerName="pull" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.726150 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerName="pull" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.726346 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4b9e49f-2140-42de-b29f-6241bafc109e" containerName="extract" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.727108 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.732021 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.732223 4925 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-h9rvd" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.737706 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.743843 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl"] Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.900818 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2tnk\" (UniqueName: \"kubernetes.io/projected/c784a907-759a-4d6c-8c04-4254fa1f0b00-kube-api-access-r2tnk\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dc7jl\" (UID: \"c784a907-759a-4d6c-8c04-4254fa1f0b00\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:08 crc kubenswrapper[4925]: I0121 11:14:08.900968 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c784a907-759a-4d6c-8c04-4254fa1f0b00-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dc7jl\" (UID: \"c784a907-759a-4d6c-8c04-4254fa1f0b00\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:09 crc kubenswrapper[4925]: I0121 11:14:09.002867 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2tnk\" (UniqueName: \"kubernetes.io/projected/c784a907-759a-4d6c-8c04-4254fa1f0b00-kube-api-access-r2tnk\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dc7jl\" (UID: \"c784a907-759a-4d6c-8c04-4254fa1f0b00\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:09 crc kubenswrapper[4925]: I0121 11:14:09.002957 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c784a907-759a-4d6c-8c04-4254fa1f0b00-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dc7jl\" (UID: \"c784a907-759a-4d6c-8c04-4254fa1f0b00\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:09 crc kubenswrapper[4925]: I0121 11:14:09.003810 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c784a907-759a-4d6c-8c04-4254fa1f0b00-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dc7jl\" (UID: \"c784a907-759a-4d6c-8c04-4254fa1f0b00\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:09 crc kubenswrapper[4925]: I0121 11:14:09.043168 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2tnk\" (UniqueName: \"kubernetes.io/projected/c784a907-759a-4d6c-8c04-4254fa1f0b00-kube-api-access-r2tnk\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dc7jl\" (UID: \"c784a907-759a-4d6c-8c04-4254fa1f0b00\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:09 crc kubenswrapper[4925]: I0121 11:14:09.052209 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" Jan 21 11:14:09 crc kubenswrapper[4925]: I0121 11:14:09.698125 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl"] Jan 21 11:14:10 crc kubenswrapper[4925]: I0121 11:14:10.673747 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" event={"ID":"c784a907-759a-4d6c-8c04-4254fa1f0b00","Type":"ContainerStarted","Data":"64b652932f8e0444560c468a57f5e1d6c7e3735b87214c73d4cc9b599f6937a9"} Jan 21 11:14:23 crc kubenswrapper[4925]: I0121 11:14:22.996448 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" event={"ID":"c784a907-759a-4d6c-8c04-4254fa1f0b00","Type":"ContainerStarted","Data":"99b3e3e9a2360e0a226eb6129dd05a55656b89f78e2bf7647dceb57e64497ac8"} Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.212122 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dc7jl" podStartSLOduration=6.674705426 podStartE2EDuration="19.212091872s" podCreationTimestamp="2026-01-21 11:14:08 +0000 UTC" firstStartedPulling="2026-01-21 11:14:09.739791857 +0000 UTC m=+1141.343683791" lastFinishedPulling="2026-01-21 11:14:22.277178303 +0000 UTC m=+1153.881070237" observedRunningTime="2026-01-21 11:14:23.17978759 +0000 UTC m=+1154.783679524" watchObservedRunningTime="2026-01-21 11:14:27.212091872 +0000 UTC m=+1158.815983806" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.218098 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-9vbqj"] Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.220103 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.222972 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.223483 4925 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-4h2p8" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.226207 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.244170 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-9vbqj"] Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.286506 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv66c\" (UniqueName: \"kubernetes.io/projected/d2372e44-af17-4c55-9a11-67fb28adcc08-kube-api-access-sv66c\") pod \"cert-manager-webhook-f4fb5df64-9vbqj\" (UID: \"d2372e44-af17-4c55-9a11-67fb28adcc08\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.286722 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d2372e44-af17-4c55-9a11-67fb28adcc08-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-9vbqj\" (UID: \"d2372e44-af17-4c55-9a11-67fb28adcc08\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.387941 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d2372e44-af17-4c55-9a11-67fb28adcc08-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-9vbqj\" (UID: \"d2372e44-af17-4c55-9a11-67fb28adcc08\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.388174 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv66c\" (UniqueName: \"kubernetes.io/projected/d2372e44-af17-4c55-9a11-67fb28adcc08-kube-api-access-sv66c\") pod \"cert-manager-webhook-f4fb5df64-9vbqj\" (UID: \"d2372e44-af17-4c55-9a11-67fb28adcc08\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.414073 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d2372e44-af17-4c55-9a11-67fb28adcc08-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-9vbqj\" (UID: \"d2372e44-af17-4c55-9a11-67fb28adcc08\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.414488 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv66c\" (UniqueName: \"kubernetes.io/projected/d2372e44-af17-4c55-9a11-67fb28adcc08-kube-api-access-sv66c\") pod \"cert-manager-webhook-f4fb5df64-9vbqj\" (UID: \"d2372e44-af17-4c55-9a11-67fb28adcc08\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.554832 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:27 crc kubenswrapper[4925]: I0121 11:14:27.844144 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-9vbqj"] Jan 21 11:14:28 crc kubenswrapper[4925]: I0121 11:14:28.034861 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" event={"ID":"d2372e44-af17-4c55-9a11-67fb28adcc08","Type":"ContainerStarted","Data":"b06a016ba99ff3ab833071658aa38eedcfd955b1ec34d37af5ee1823dcd3ecbc"} Jan 21 11:14:29 crc kubenswrapper[4925]: I0121 11:14:29.762110 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg"] Jan 21 11:14:29 crc kubenswrapper[4925]: I0121 11:14:29.768967 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:29 crc kubenswrapper[4925]: I0121 11:14:29.772471 4925 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-f9zsp" Jan 21 11:14:29 crc kubenswrapper[4925]: I0121 11:14:29.792064 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg"] Jan 21 11:14:29 crc kubenswrapper[4925]: I0121 11:14:29.948668 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1ae434a0-174f-4c93-bf2c-ab2091c54e6c-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-gl7pg\" (UID: \"1ae434a0-174f-4c93-bf2c-ab2091c54e6c\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:29 crc kubenswrapper[4925]: I0121 11:14:29.949794 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9khtr\" (UniqueName: \"kubernetes.io/projected/1ae434a0-174f-4c93-bf2c-ab2091c54e6c-kube-api-access-9khtr\") pod \"cert-manager-cainjector-855d9ccff4-gl7pg\" (UID: \"1ae434a0-174f-4c93-bf2c-ab2091c54e6c\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:30 crc kubenswrapper[4925]: I0121 11:14:30.051994 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1ae434a0-174f-4c93-bf2c-ab2091c54e6c-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-gl7pg\" (UID: \"1ae434a0-174f-4c93-bf2c-ab2091c54e6c\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:30 crc kubenswrapper[4925]: I0121 11:14:30.052772 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9khtr\" (UniqueName: \"kubernetes.io/projected/1ae434a0-174f-4c93-bf2c-ab2091c54e6c-kube-api-access-9khtr\") pod \"cert-manager-cainjector-855d9ccff4-gl7pg\" (UID: \"1ae434a0-174f-4c93-bf2c-ab2091c54e6c\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:30 crc kubenswrapper[4925]: I0121 11:14:30.080225 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9khtr\" (UniqueName: \"kubernetes.io/projected/1ae434a0-174f-4c93-bf2c-ab2091c54e6c-kube-api-access-9khtr\") pod \"cert-manager-cainjector-855d9ccff4-gl7pg\" (UID: \"1ae434a0-174f-4c93-bf2c-ab2091c54e6c\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:30 crc kubenswrapper[4925]: I0121 11:14:30.091355 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1ae434a0-174f-4c93-bf2c-ab2091c54e6c-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-gl7pg\" (UID: \"1ae434a0-174f-4c93-bf2c-ab2091c54e6c\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:30 crc kubenswrapper[4925]: I0121 11:14:30.097381 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" Jan 21 11:14:30 crc kubenswrapper[4925]: I0121 11:14:30.621948 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg"] Jan 21 11:14:30 crc kubenswrapper[4925]: W0121 11:14:30.632196 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ae434a0_174f_4c93_bf2c_ab2091c54e6c.slice/crio-f1f62389efb8e70eb9d7dcf7ba0b845deb59d37ad07c6c7c6f6038e5765bec26 WatchSource:0}: Error finding container f1f62389efb8e70eb9d7dcf7ba0b845deb59d37ad07c6c7c6f6038e5765bec26: Status 404 returned error can't find the container with id f1f62389efb8e70eb9d7dcf7ba0b845deb59d37ad07c6c7c6f6038e5765bec26 Jan 21 11:14:31 crc kubenswrapper[4925]: I0121 11:14:31.142986 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" event={"ID":"1ae434a0-174f-4c93-bf2c-ab2091c54e6c","Type":"ContainerStarted","Data":"f1f62389efb8e70eb9d7dcf7ba0b845deb59d37ad07c6c7c6f6038e5765bec26"} Jan 21 11:14:44 crc kubenswrapper[4925]: E0121 11:14:44.379453 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:29a0fa1c2f2a6cee62a0468a3883d16d491b4af29130dad6e3e2bb2948f274df" Jan 21 11:14:44 crc kubenswrapper[4925]: E0121 11:14:44.384558 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cert-manager-webhook,Image:registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:29a0fa1c2f2a6cee62a0468a3883d16d491b4af29130dad6e3e2bb2948f274df,Command:[/app/cmd/webhook/webhook],Args:[--dynamic-serving-ca-secret-name=cert-manager-webhook-ca --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) --dynamic-serving-dns-names=cert-manager-webhook,cert-manager-webhook.$(POD_NAMESPACE),cert-manager-webhook.$(POD_NAMESPACE).svc --secure-port=10250 --v=2],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:10250,Protocol:TCP,HostIP:,},ContainerPort{Name:healthcheck,HostPort:0,ContainerPort:6080,Protocol:TCP,HostIP:,},ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9402,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bound-sa-token,ReadOnly:true,MountPath:/var/run/secrets/openshift/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sv66c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{1 0 healthcheck},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:60,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{1 0 healthcheck},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000700000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cert-manager-webhook-f4fb5df64-9vbqj_cert-manager(d2372e44-af17-4c55-9a11-67fb28adcc08): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 11:14:44 crc kubenswrapper[4925]: E0121 11:14:44.387135 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" podUID="d2372e44-af17-4c55-9a11-67fb28adcc08" Jan 21 11:14:45 crc kubenswrapper[4925]: I0121 11:14:45.710604 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" event={"ID":"d2372e44-af17-4c55-9a11-67fb28adcc08","Type":"ContainerStarted","Data":"411955bbbc77b484f1cbe4de89678c40e7f212594fb70cbde5747751e7740ecc"} Jan 21 11:14:45 crc kubenswrapper[4925]: I0121 11:14:45.711650 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:45 crc kubenswrapper[4925]: I0121 11:14:45.713513 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" event={"ID":"1ae434a0-174f-4c93-bf2c-ab2091c54e6c","Type":"ContainerStarted","Data":"aca02ab4a09e0d4437351a2272053738961cc87a4b674311b838b7a0bb722413"} Jan 21 11:14:45 crc kubenswrapper[4925]: I0121 11:14:45.733500 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" podStartSLOduration=-9223372018.121395 podStartE2EDuration="18.733381378s" podCreationTimestamp="2026-01-21 11:14:27 +0000 UTC" firstStartedPulling="2026-01-21 11:14:27.854408805 +0000 UTC m=+1159.458300739" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:14:45.729915659 +0000 UTC m=+1177.333807583" watchObservedRunningTime="2026-01-21 11:14:45.733381378 +0000 UTC m=+1177.337273322" Jan 21 11:14:45 crc kubenswrapper[4925]: I0121 11:14:45.889239 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-gl7pg" podStartSLOduration=3.060034254 podStartE2EDuration="16.889198721s" podCreationTimestamp="2026-01-21 11:14:29 +0000 UTC" firstStartedPulling="2026-01-21 11:14:30.636719811 +0000 UTC m=+1162.240611745" lastFinishedPulling="2026-01-21 11:14:44.465884278 +0000 UTC m=+1176.069776212" observedRunningTime="2026-01-21 11:14:45.884547713 +0000 UTC m=+1177.488439647" watchObservedRunningTime="2026-01-21 11:14:45.889198721 +0000 UTC m=+1177.493090655" Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.816135 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-946sh"] Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.817695 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.819737 4925 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-jbpnn" Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.832286 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-946sh"] Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.894088 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d5dc9762-e122-475f-a1a2-2d9711313716-bound-sa-token\") pod \"cert-manager-86cb77c54b-946sh\" (UID: \"d5dc9762-e122-475f-a1a2-2d9711313716\") " pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.894160 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5fbz\" (UniqueName: \"kubernetes.io/projected/d5dc9762-e122-475f-a1a2-2d9711313716-kube-api-access-c5fbz\") pod \"cert-manager-86cb77c54b-946sh\" (UID: \"d5dc9762-e122-475f-a1a2-2d9711313716\") " pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.995662 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d5dc9762-e122-475f-a1a2-2d9711313716-bound-sa-token\") pod \"cert-manager-86cb77c54b-946sh\" (UID: \"d5dc9762-e122-475f-a1a2-2d9711313716\") " pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:46 crc kubenswrapper[4925]: I0121 11:14:46.995766 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5fbz\" (UniqueName: \"kubernetes.io/projected/d5dc9762-e122-475f-a1a2-2d9711313716-kube-api-access-c5fbz\") pod \"cert-manager-86cb77c54b-946sh\" (UID: \"d5dc9762-e122-475f-a1a2-2d9711313716\") " pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:47 crc kubenswrapper[4925]: I0121 11:14:47.036603 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d5dc9762-e122-475f-a1a2-2d9711313716-bound-sa-token\") pod \"cert-manager-86cb77c54b-946sh\" (UID: \"d5dc9762-e122-475f-a1a2-2d9711313716\") " pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:47 crc kubenswrapper[4925]: I0121 11:14:47.036862 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5fbz\" (UniqueName: \"kubernetes.io/projected/d5dc9762-e122-475f-a1a2-2d9711313716-kube-api-access-c5fbz\") pod \"cert-manager-86cb77c54b-946sh\" (UID: \"d5dc9762-e122-475f-a1a2-2d9711313716\") " pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:47 crc kubenswrapper[4925]: I0121 11:14:47.139406 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-946sh" Jan 21 11:14:47 crc kubenswrapper[4925]: I0121 11:14:47.710900 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-946sh"] Jan 21 11:14:47 crc kubenswrapper[4925]: W0121 11:14:47.718052 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5dc9762_e122_475f_a1a2_2d9711313716.slice/crio-87a6e7bcd43254a308fd31e5c776093843eb913cb605aa3771b8599bde5da447 WatchSource:0}: Error finding container 87a6e7bcd43254a308fd31e5c776093843eb913cb605aa3771b8599bde5da447: Status 404 returned error can't find the container with id 87a6e7bcd43254a308fd31e5c776093843eb913cb605aa3771b8599bde5da447 Jan 21 11:14:47 crc kubenswrapper[4925]: I0121 11:14:47.745380 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-946sh" event={"ID":"d5dc9762-e122-475f-a1a2-2d9711313716","Type":"ContainerStarted","Data":"87a6e7bcd43254a308fd31e5c776093843eb913cb605aa3771b8599bde5da447"} Jan 21 11:14:52 crc kubenswrapper[4925]: I0121 11:14:52.558649 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-9vbqj" Jan 21 11:14:52 crc kubenswrapper[4925]: I0121 11:14:52.786792 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-946sh" event={"ID":"d5dc9762-e122-475f-a1a2-2d9711313716","Type":"ContainerStarted","Data":"230151df336f1827139e8e9d1a2ea52678e47b7815775dae15561bdf0f0c6005"} Jan 21 11:14:53 crc kubenswrapper[4925]: I0121 11:14:53.820735 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-946sh" podStartSLOduration=7.820709843 podStartE2EDuration="7.820709843s" podCreationTimestamp="2026-01-21 11:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:14:53.818944426 +0000 UTC m=+1185.422836360" watchObservedRunningTime="2026-01-21 11:14:53.820709843 +0000 UTC m=+1185.424601787" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.123326 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-q67z4"] Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.138849 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q67z4" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.150469 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.150740 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-krxf6" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.150746 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.174673 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-q67z4"] Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.273609 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v2cq\" (UniqueName: \"kubernetes.io/projected/ecf1dd39-0308-4582-892f-6fed0ad7b871-kube-api-access-5v2cq\") pod \"openstack-operator-index-q67z4\" (UID: \"ecf1dd39-0308-4582-892f-6fed0ad7b871\") " pod="openstack-operators/openstack-operator-index-q67z4" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.375061 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v2cq\" (UniqueName: \"kubernetes.io/projected/ecf1dd39-0308-4582-892f-6fed0ad7b871-kube-api-access-5v2cq\") pod \"openstack-operator-index-q67z4\" (UID: \"ecf1dd39-0308-4582-892f-6fed0ad7b871\") " pod="openstack-operators/openstack-operator-index-q67z4" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.399888 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v2cq\" (UniqueName: \"kubernetes.io/projected/ecf1dd39-0308-4582-892f-6fed0ad7b871-kube-api-access-5v2cq\") pod \"openstack-operator-index-q67z4\" (UID: \"ecf1dd39-0308-4582-892f-6fed0ad7b871\") " pod="openstack-operators/openstack-operator-index-q67z4" Jan 21 11:14:57 crc kubenswrapper[4925]: I0121 11:14:57.673929 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q67z4" Jan 21 11:14:58 crc kubenswrapper[4925]: I0121 11:14:58.167539 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-q67z4"] Jan 21 11:14:58 crc kubenswrapper[4925]: W0121 11:14:58.174490 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podecf1dd39_0308_4582_892f_6fed0ad7b871.slice/crio-ccfa2a236018880b5cbc098c6bdcaf5f523ff3dd84611c4c7d9cf018768cb8eb WatchSource:0}: Error finding container ccfa2a236018880b5cbc098c6bdcaf5f523ff3dd84611c4c7d9cf018768cb8eb: Status 404 returned error can't find the container with id ccfa2a236018880b5cbc098c6bdcaf5f523ff3dd84611c4c7d9cf018768cb8eb Jan 21 11:14:59 crc kubenswrapper[4925]: I0121 11:14:59.017969 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q67z4" event={"ID":"ecf1dd39-0308-4582-892f-6fed0ad7b871","Type":"ContainerStarted","Data":"ccfa2a236018880b5cbc098c6bdcaf5f523ff3dd84611c4c7d9cf018768cb8eb"} Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.160696 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk"] Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.162479 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.169151 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.169424 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.170281 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk"] Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.237945 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6b514e2-7001-49cd-8e45-7a3333c7d25a-secret-volume\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.238000 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6b514e2-7001-49cd-8e45-7a3333c7d25a-config-volume\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.238041 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpp4j\" (UniqueName: \"kubernetes.io/projected/f6b514e2-7001-49cd-8e45-7a3333c7d25a-kube-api-access-hpp4j\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.298484 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-q67z4"] Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.339927 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpp4j\" (UniqueName: \"kubernetes.io/projected/f6b514e2-7001-49cd-8e45-7a3333c7d25a-kube-api-access-hpp4j\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.340110 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6b514e2-7001-49cd-8e45-7a3333c7d25a-secret-volume\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.340157 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6b514e2-7001-49cd-8e45-7a3333c7d25a-config-volume\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.342440 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6b514e2-7001-49cd-8e45-7a3333c7d25a-config-volume\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.355041 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6b514e2-7001-49cd-8e45-7a3333c7d25a-secret-volume\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.359423 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpp4j\" (UniqueName: \"kubernetes.io/projected/f6b514e2-7001-49cd-8e45-7a3333c7d25a-kube-api-access-hpp4j\") pod \"collect-profiles-29483235-qndhk\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.491138 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.905624 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6jmrz"] Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.906829 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:00 crc kubenswrapper[4925]: I0121 11:15:00.926181 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6jmrz"] Jan 21 11:15:01 crc kubenswrapper[4925]: I0121 11:15:01.055570 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rhlp\" (UniqueName: \"kubernetes.io/projected/25cc6d46-b21a-463f-a13a-9874780c87f3-kube-api-access-2rhlp\") pod \"openstack-operator-index-6jmrz\" (UID: \"25cc6d46-b21a-463f-a13a-9874780c87f3\") " pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:01 crc kubenswrapper[4925]: I0121 11:15:01.156994 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rhlp\" (UniqueName: \"kubernetes.io/projected/25cc6d46-b21a-463f-a13a-9874780c87f3-kube-api-access-2rhlp\") pod \"openstack-operator-index-6jmrz\" (UID: \"25cc6d46-b21a-463f-a13a-9874780c87f3\") " pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:01 crc kubenswrapper[4925]: I0121 11:15:01.198262 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rhlp\" (UniqueName: \"kubernetes.io/projected/25cc6d46-b21a-463f-a13a-9874780c87f3-kube-api-access-2rhlp\") pod \"openstack-operator-index-6jmrz\" (UID: \"25cc6d46-b21a-463f-a13a-9874780c87f3\") " pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:01 crc kubenswrapper[4925]: I0121 11:15:01.229298 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:02 crc kubenswrapper[4925]: I0121 11:15:02.384789 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6jmrz"] Jan 21 11:15:02 crc kubenswrapper[4925]: I0121 11:15:02.439740 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk"] Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.054364 4925 generic.go:334] "Generic (PLEG): container finished" podID="f6b514e2-7001-49cd-8e45-7a3333c7d25a" containerID="bc98af7d83ac652093940b525ffa759385bec47c7a3952b84cc66c8fd5f9577f" exitCode=0 Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.054481 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" event={"ID":"f6b514e2-7001-49cd-8e45-7a3333c7d25a","Type":"ContainerDied","Data":"bc98af7d83ac652093940b525ffa759385bec47c7a3952b84cc66c8fd5f9577f"} Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.054954 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" event={"ID":"f6b514e2-7001-49cd-8e45-7a3333c7d25a","Type":"ContainerStarted","Data":"3be9e51452b12778ee5a177192dbffde94ae9ae6e4a357c904226468ca7f426c"} Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.056417 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q67z4" event={"ID":"ecf1dd39-0308-4582-892f-6fed0ad7b871","Type":"ContainerStarted","Data":"34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01"} Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.056523 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-q67z4" podUID="ecf1dd39-0308-4582-892f-6fed0ad7b871" containerName="registry-server" containerID="cri-o://34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01" gracePeriod=2 Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.059031 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6jmrz" event={"ID":"25cc6d46-b21a-463f-a13a-9874780c87f3","Type":"ContainerStarted","Data":"07c07adb793d3985ddf30c703305eda9698efc1cadc97cfebb12141a942ab1d7"} Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.059073 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6jmrz" event={"ID":"25cc6d46-b21a-463f-a13a-9874780c87f3","Type":"ContainerStarted","Data":"0c476769bb7feea192b162d13fc9dc4541fb5894430fe99da3706e8860b16683"} Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.090445 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6jmrz" podStartSLOduration=2.971323499 podStartE2EDuration="3.090419147s" podCreationTimestamp="2026-01-21 11:15:00 +0000 UTC" firstStartedPulling="2026-01-21 11:15:02.444057257 +0000 UTC m=+1194.047949191" lastFinishedPulling="2026-01-21 11:15:02.563152895 +0000 UTC m=+1194.167044839" observedRunningTime="2026-01-21 11:15:03.086278266 +0000 UTC m=+1194.690170210" watchObservedRunningTime="2026-01-21 11:15:03.090419147 +0000 UTC m=+1194.694311241" Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.108373 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-q67z4" podStartSLOduration=1.830657502 podStartE2EDuration="6.108343076s" podCreationTimestamp="2026-01-21 11:14:57 +0000 UTC" firstStartedPulling="2026-01-21 11:14:58.177545757 +0000 UTC m=+1189.781437691" lastFinishedPulling="2026-01-21 11:15:02.455231331 +0000 UTC m=+1194.059123265" observedRunningTime="2026-01-21 11:15:03.102029505 +0000 UTC m=+1194.705921459" watchObservedRunningTime="2026-01-21 11:15:03.108343076 +0000 UTC m=+1194.712235010" Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.570324 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q67z4" Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.596260 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5v2cq\" (UniqueName: \"kubernetes.io/projected/ecf1dd39-0308-4582-892f-6fed0ad7b871-kube-api-access-5v2cq\") pod \"ecf1dd39-0308-4582-892f-6fed0ad7b871\" (UID: \"ecf1dd39-0308-4582-892f-6fed0ad7b871\") " Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.604183 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecf1dd39-0308-4582-892f-6fed0ad7b871-kube-api-access-5v2cq" (OuterVolumeSpecName: "kube-api-access-5v2cq") pod "ecf1dd39-0308-4582-892f-6fed0ad7b871" (UID: "ecf1dd39-0308-4582-892f-6fed0ad7b871"). InnerVolumeSpecName "kube-api-access-5v2cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:15:03 crc kubenswrapper[4925]: I0121 11:15:03.698577 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5v2cq\" (UniqueName: \"kubernetes.io/projected/ecf1dd39-0308-4582-892f-6fed0ad7b871-kube-api-access-5v2cq\") on node \"crc\" DevicePath \"\"" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.069870 4925 generic.go:334] "Generic (PLEG): container finished" podID="ecf1dd39-0308-4582-892f-6fed0ad7b871" containerID="34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01" exitCode=0 Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.070122 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q67z4" event={"ID":"ecf1dd39-0308-4582-892f-6fed0ad7b871","Type":"ContainerDied","Data":"34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01"} Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.070134 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q67z4" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.070627 4925 scope.go:117] "RemoveContainer" containerID="34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.070609 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q67z4" event={"ID":"ecf1dd39-0308-4582-892f-6fed0ad7b871","Type":"ContainerDied","Data":"ccfa2a236018880b5cbc098c6bdcaf5f523ff3dd84611c4c7d9cf018768cb8eb"} Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.102809 4925 scope.go:117] "RemoveContainer" containerID="34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01" Jan 21 11:15:04 crc kubenswrapper[4925]: E0121 11:15:04.104805 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01\": container with ID starting with 34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01 not found: ID does not exist" containerID="34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.104857 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01"} err="failed to get container status \"34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01\": rpc error: code = NotFound desc = could not find container \"34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01\": container with ID starting with 34534d61ce0e53b635544bcf8779d8376323320b0c4b06b184934200fd44ff01 not found: ID does not exist" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.108728 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-q67z4"] Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.116280 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-q67z4"] Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.517191 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.716909 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6b514e2-7001-49cd-8e45-7a3333c7d25a-config-volume\") pod \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.717020 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6b514e2-7001-49cd-8e45-7a3333c7d25a-secret-volume\") pod \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.717098 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpp4j\" (UniqueName: \"kubernetes.io/projected/f6b514e2-7001-49cd-8e45-7a3333c7d25a-kube-api-access-hpp4j\") pod \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\" (UID: \"f6b514e2-7001-49cd-8e45-7a3333c7d25a\") " Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.718092 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6b514e2-7001-49cd-8e45-7a3333c7d25a-config-volume" (OuterVolumeSpecName: "config-volume") pod "f6b514e2-7001-49cd-8e45-7a3333c7d25a" (UID: "f6b514e2-7001-49cd-8e45-7a3333c7d25a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.723317 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6b514e2-7001-49cd-8e45-7a3333c7d25a-kube-api-access-hpp4j" (OuterVolumeSpecName: "kube-api-access-hpp4j") pod "f6b514e2-7001-49cd-8e45-7a3333c7d25a" (UID: "f6b514e2-7001-49cd-8e45-7a3333c7d25a"). InnerVolumeSpecName "kube-api-access-hpp4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.728516 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b514e2-7001-49cd-8e45-7a3333c7d25a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f6b514e2-7001-49cd-8e45-7a3333c7d25a" (UID: "f6b514e2-7001-49cd-8e45-7a3333c7d25a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.818596 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6b514e2-7001-49cd-8e45-7a3333c7d25a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.818942 4925 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6b514e2-7001-49cd-8e45-7a3333c7d25a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:15:04 crc kubenswrapper[4925]: I0121 11:15:04.819062 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpp4j\" (UniqueName: \"kubernetes.io/projected/f6b514e2-7001-49cd-8e45-7a3333c7d25a-kube-api-access-hpp4j\") on node \"crc\" DevicePath \"\"" Jan 21 11:15:05 crc kubenswrapper[4925]: I0121 11:15:05.080790 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" event={"ID":"f6b514e2-7001-49cd-8e45-7a3333c7d25a","Type":"ContainerDied","Data":"3be9e51452b12778ee5a177192dbffde94ae9ae6e4a357c904226468ca7f426c"} Jan 21 11:15:05 crc kubenswrapper[4925]: I0121 11:15:05.081715 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3be9e51452b12778ee5a177192dbffde94ae9ae6e4a357c904226468ca7f426c" Jan 21 11:15:05 crc kubenswrapper[4925]: I0121 11:15:05.080860 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk" Jan 21 11:15:05 crc kubenswrapper[4925]: I0121 11:15:05.627949 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecf1dd39-0308-4582-892f-6fed0ad7b871" path="/var/lib/kubelet/pods/ecf1dd39-0308-4582-892f-6fed0ad7b871/volumes" Jan 21 11:15:11 crc kubenswrapper[4925]: I0121 11:15:11.230166 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:11 crc kubenswrapper[4925]: I0121 11:15:11.231106 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:11 crc kubenswrapper[4925]: I0121 11:15:11.265970 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:12 crc kubenswrapper[4925]: I0121 11:15:12.159246 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-6jmrz" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.704798 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75"] Jan 21 11:15:16 crc kubenswrapper[4925]: E0121 11:15:16.705789 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b514e2-7001-49cd-8e45-7a3333c7d25a" containerName="collect-profiles" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.705806 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b514e2-7001-49cd-8e45-7a3333c7d25a" containerName="collect-profiles" Jan 21 11:15:16 crc kubenswrapper[4925]: E0121 11:15:16.705842 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecf1dd39-0308-4582-892f-6fed0ad7b871" containerName="registry-server" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.705849 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecf1dd39-0308-4582-892f-6fed0ad7b871" containerName="registry-server" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.705965 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6b514e2-7001-49cd-8e45-7a3333c7d25a" containerName="collect-profiles" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.705979 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecf1dd39-0308-4582-892f-6fed0ad7b871" containerName="registry-server" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.707810 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.713273 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-6vm2t" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.718850 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75"] Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.808077 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7tfx\" (UniqueName: \"kubernetes.io/projected/06c4add2-f00d-4aea-8168-f165cdf2b7cf-kube-api-access-m7tfx\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.808345 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-util\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.808518 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-bundle\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.909588 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-util\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.909973 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-bundle\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.910099 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7tfx\" (UniqueName: \"kubernetes.io/projected/06c4add2-f00d-4aea-8168-f165cdf2b7cf-kube-api-access-m7tfx\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.911000 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-util\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.911228 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-bundle\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:16 crc kubenswrapper[4925]: I0121 11:15:16.931173 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7tfx\" (UniqueName: \"kubernetes.io/projected/06c4add2-f00d-4aea-8168-f165cdf2b7cf-kube-api-access-m7tfx\") pod \"23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:17 crc kubenswrapper[4925]: I0121 11:15:17.045499 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:17 crc kubenswrapper[4925]: I0121 11:15:17.467222 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75"] Jan 21 11:15:18 crc kubenswrapper[4925]: I0121 11:15:18.178860 4925 generic.go:334] "Generic (PLEG): container finished" podID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerID="f08e117c717a63b4682cee570c58bc203ec2672af0ef18e9e6117c91e746ab96" exitCode=0 Jan 21 11:15:18 crc kubenswrapper[4925]: I0121 11:15:18.178918 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" event={"ID":"06c4add2-f00d-4aea-8168-f165cdf2b7cf","Type":"ContainerDied","Data":"f08e117c717a63b4682cee570c58bc203ec2672af0ef18e9e6117c91e746ab96"} Jan 21 11:15:18 crc kubenswrapper[4925]: I0121 11:15:18.178996 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" event={"ID":"06c4add2-f00d-4aea-8168-f165cdf2b7cf","Type":"ContainerStarted","Data":"6128c1f49df1dd27af1db5649f4a3a602bef00d9edf318c9097b6b8d05522a0c"} Jan 21 11:15:18 crc kubenswrapper[4925]: I0121 11:15:18.180899 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:15:20 crc kubenswrapper[4925]: I0121 11:15:20.199946 4925 generic.go:334] "Generic (PLEG): container finished" podID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerID="c623be70895f495a56c4c7df920d0880d9c379e3e6133c809cc2a4b8d88113e8" exitCode=0 Jan 21 11:15:20 crc kubenswrapper[4925]: I0121 11:15:20.200031 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" event={"ID":"06c4add2-f00d-4aea-8168-f165cdf2b7cf","Type":"ContainerDied","Data":"c623be70895f495a56c4c7df920d0880d9c379e3e6133c809cc2a4b8d88113e8"} Jan 21 11:15:21 crc kubenswrapper[4925]: I0121 11:15:21.209980 4925 generic.go:334] "Generic (PLEG): container finished" podID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerID="7e1880e93de6f0e0a94b0a72a809aed6d507875904a1392b9142dfa7193f8c89" exitCode=0 Jan 21 11:15:21 crc kubenswrapper[4925]: I0121 11:15:21.210049 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" event={"ID":"06c4add2-f00d-4aea-8168-f165cdf2b7cf","Type":"ContainerDied","Data":"7e1880e93de6f0e0a94b0a72a809aed6d507875904a1392b9142dfa7193f8c89"} Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.495412 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.565952 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-util\") pod \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.566103 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7tfx\" (UniqueName: \"kubernetes.io/projected/06c4add2-f00d-4aea-8168-f165cdf2b7cf-kube-api-access-m7tfx\") pod \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.566495 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-bundle\") pod \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\" (UID: \"06c4add2-f00d-4aea-8168-f165cdf2b7cf\") " Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.567738 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-bundle" (OuterVolumeSpecName: "bundle") pod "06c4add2-f00d-4aea-8168-f165cdf2b7cf" (UID: "06c4add2-f00d-4aea-8168-f165cdf2b7cf"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.573338 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06c4add2-f00d-4aea-8168-f165cdf2b7cf-kube-api-access-m7tfx" (OuterVolumeSpecName: "kube-api-access-m7tfx") pod "06c4add2-f00d-4aea-8168-f165cdf2b7cf" (UID: "06c4add2-f00d-4aea-8168-f165cdf2b7cf"). InnerVolumeSpecName "kube-api-access-m7tfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.580489 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-util" (OuterVolumeSpecName: "util") pod "06c4add2-f00d-4aea-8168-f165cdf2b7cf" (UID: "06c4add2-f00d-4aea-8168-f165cdf2b7cf"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.669752 4925 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-util\") on node \"crc\" DevicePath \"\"" Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.669823 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7tfx\" (UniqueName: \"kubernetes.io/projected/06c4add2-f00d-4aea-8168-f165cdf2b7cf-kube-api-access-m7tfx\") on node \"crc\" DevicePath \"\"" Jan 21 11:15:22 crc kubenswrapper[4925]: I0121 11:15:22.669878 4925 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/06c4add2-f00d-4aea-8168-f165cdf2b7cf-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:15:23 crc kubenswrapper[4925]: I0121 11:15:23.227026 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" event={"ID":"06c4add2-f00d-4aea-8168-f165cdf2b7cf","Type":"ContainerDied","Data":"6128c1f49df1dd27af1db5649f4a3a602bef00d9edf318c9097b6b8d05522a0c"} Jan 21 11:15:23 crc kubenswrapper[4925]: I0121 11:15:23.227080 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6128c1f49df1dd27af1db5649f4a3a602bef00d9edf318c9097b6b8d05522a0c" Jan 21 11:15:23 crc kubenswrapper[4925]: I0121 11:15:23.227089 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.603619 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4"] Jan 21 11:15:30 crc kubenswrapper[4925]: E0121 11:15:30.604667 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerName="pull" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.604690 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerName="pull" Jan 21 11:15:30 crc kubenswrapper[4925]: E0121 11:15:30.604716 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerName="extract" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.604723 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerName="extract" Jan 21 11:15:30 crc kubenswrapper[4925]: E0121 11:15:30.604740 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerName="util" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.604746 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerName="util" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.604863 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="06c4add2-f00d-4aea-8168-f165cdf2b7cf" containerName="extract" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.605408 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.609266 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-57wjq" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.672165 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4"] Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.741530 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9znj\" (UniqueName: \"kubernetes.io/projected/36235ffd-d8a9-4e8f-91ef-8c989efca81a-kube-api-access-g9znj\") pod \"openstack-operator-controller-init-766b56994f-nkxz4\" (UID: \"36235ffd-d8a9-4e8f-91ef-8c989efca81a\") " pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.844002 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9znj\" (UniqueName: \"kubernetes.io/projected/36235ffd-d8a9-4e8f-91ef-8c989efca81a-kube-api-access-g9znj\") pod \"openstack-operator-controller-init-766b56994f-nkxz4\" (UID: \"36235ffd-d8a9-4e8f-91ef-8c989efca81a\") " pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.871927 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9znj\" (UniqueName: \"kubernetes.io/projected/36235ffd-d8a9-4e8f-91ef-8c989efca81a-kube-api-access-g9znj\") pod \"openstack-operator-controller-init-766b56994f-nkxz4\" (UID: \"36235ffd-d8a9-4e8f-91ef-8c989efca81a\") " pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:15:30 crc kubenswrapper[4925]: I0121 11:15:30.928383 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:15:31 crc kubenswrapper[4925]: I0121 11:15:31.211802 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4"] Jan 21 11:15:31 crc kubenswrapper[4925]: I0121 11:15:31.290715 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" event={"ID":"36235ffd-d8a9-4e8f-91ef-8c989efca81a","Type":"ContainerStarted","Data":"abb4d0bd266ab404f97135e3837734fadc4a56fb23355e7395832cad7af2e43b"} Jan 21 11:15:36 crc kubenswrapper[4925]: I0121 11:15:36.446281 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" event={"ID":"36235ffd-d8a9-4e8f-91ef-8c989efca81a","Type":"ContainerStarted","Data":"b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454"} Jan 21 11:15:36 crc kubenswrapper[4925]: I0121 11:15:36.446948 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:15:36 crc kubenswrapper[4925]: I0121 11:15:36.506470 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" podStartSLOduration=1.666836939 podStartE2EDuration="6.506439385s" podCreationTimestamp="2026-01-21 11:15:30 +0000 UTC" firstStartedPulling="2026-01-21 11:15:31.213705347 +0000 UTC m=+1222.817597281" lastFinishedPulling="2026-01-21 11:15:36.053307793 +0000 UTC m=+1227.657199727" observedRunningTime="2026-01-21 11:15:36.484073546 +0000 UTC m=+1228.087965490" watchObservedRunningTime="2026-01-21 11:15:36.506439385 +0000 UTC m=+1228.110331319" Jan 21 11:15:50 crc kubenswrapper[4925]: I0121 11:15:50.932642 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.676552 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.677975 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.680111 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-gn2r2" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.701079 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.706058 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.707355 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.709116 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-bp225" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.729976 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.731117 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.734115 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-ff8nl" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.738909 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.752028 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.752909 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.757003 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-gdpc6" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.764331 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7qx7\" (UniqueName: \"kubernetes.io/projected/d8031329-a6ad-49da-881e-94db9f545ab7-kube-api-access-q7qx7\") pod \"barbican-operator-controller-manager-7ddb5c749-r4klh\" (UID: \"d8031329-a6ad-49da-881e-94db9f545ab7\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.769309 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.770722 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.776858 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-qxr6d" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.787444 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.812360 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.830798 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.839297 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.840713 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.854873 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-bwpz4" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.869338 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c97xx\" (UniqueName: \"kubernetes.io/projected/9b9f5cfa-93e1-4940-b7f0-066c6bc4f194-kube-api-access-c97xx\") pod \"heat-operator-controller-manager-594c8c9d5d-h9szq\" (UID: \"9b9f5cfa-93e1-4940-b7f0-066c6bc4f194\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.869386 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.869435 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdb8c\" (UniqueName: \"kubernetes.io/projected/cf77bf31-5d25-4015-b274-05dbedbedf5a-kube-api-access-zdb8c\") pod \"cinder-operator-controller-manager-9b68f5989-vmg65\" (UID: \"cf77bf31-5d25-4015-b274-05dbedbedf5a\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.869469 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfqv6\" (UniqueName: \"kubernetes.io/projected/e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23-kube-api-access-zfqv6\") pod \"glance-operator-controller-manager-c6994669c-ggpw9\" (UID: \"e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.869496 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7qx7\" (UniqueName: \"kubernetes.io/projected/d8031329-a6ad-49da-881e-94db9f545ab7-kube-api-access-q7qx7\") pod \"barbican-operator-controller-manager-7ddb5c749-r4klh\" (UID: \"d8031329-a6ad-49da-881e-94db9f545ab7\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.869592 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqd72\" (UniqueName: \"kubernetes.io/projected/50c322c0-a941-48fa-bf86-c2daa64a9aa8-kube-api-access-zqd72\") pod \"designate-operator-controller-manager-9f958b845-mjg4d\" (UID: \"50c322c0-a941-48fa-bf86-c2daa64a9aa8\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.870319 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.881338 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.883792 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-4g2l9" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.892438 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.893817 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.896618 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-44dr6" Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.904125 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl"] Jan 21 11:16:10 crc kubenswrapper[4925]: I0121 11:16:10.905541 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.913906 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.915419 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-8fjnk" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.950140 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7qx7\" (UniqueName: \"kubernetes.io/projected/d8031329-a6ad-49da-881e-94db9f545ab7-kube-api-access-q7qx7\") pod \"barbican-operator-controller-manager-7ddb5c749-r4klh\" (UID: \"d8031329-a6ad-49da-881e-94db9f545ab7\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995339 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c97xx\" (UniqueName: \"kubernetes.io/projected/9b9f5cfa-93e1-4940-b7f0-066c6bc4f194-kube-api-access-c97xx\") pod \"heat-operator-controller-manager-594c8c9d5d-h9szq\" (UID: \"9b9f5cfa-93e1-4940-b7f0-066c6bc4f194\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995463 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdb8c\" (UniqueName: \"kubernetes.io/projected/cf77bf31-5d25-4015-b274-05dbedbedf5a-kube-api-access-zdb8c\") pod \"cinder-operator-controller-manager-9b68f5989-vmg65\" (UID: \"cf77bf31-5d25-4015-b274-05dbedbedf5a\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995509 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfqv6\" (UniqueName: \"kubernetes.io/projected/e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23-kube-api-access-zfqv6\") pod \"glance-operator-controller-manager-c6994669c-ggpw9\" (UID: \"e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995595 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-528nq\" (UniqueName: \"kubernetes.io/projected/2c47ce4c-9012-4798-9bf8-127a96ad285e-kube-api-access-528nq\") pod \"keystone-operator-controller-manager-767fdc4f47-2znsh\" (UID: \"2c47ce4c-9012-4798-9bf8-127a96ad285e\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995659 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zqnl\" (UniqueName: \"kubernetes.io/projected/c4be49a0-e872-456f-a102-928f5210524f-kube-api-access-2zqnl\") pod \"horizon-operator-controller-manager-77d5c5b54f-gcxp4\" (UID: \"c4be49a0-e872-456f-a102-928f5210524f\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995768 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqd72\" (UniqueName: \"kubernetes.io/projected/50c322c0-a941-48fa-bf86-c2daa64a9aa8-kube-api-access-zqd72\") pod \"designate-operator-controller-manager-9f958b845-mjg4d\" (UID: \"50c322c0-a941-48fa-bf86-c2daa64a9aa8\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995822 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-852w5\" (UniqueName: \"kubernetes.io/projected/dbe9a043-a969-429b-b7b1-33d12296c52c-kube-api-access-852w5\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:10.995877 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.031875 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.075386 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.098728 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zqnl\" (UniqueName: \"kubernetes.io/projected/c4be49a0-e872-456f-a102-928f5210524f-kube-api-access-2zqnl\") pod \"horizon-operator-controller-manager-77d5c5b54f-gcxp4\" (UID: \"c4be49a0-e872-456f-a102-928f5210524f\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.098833 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-852w5\" (UniqueName: \"kubernetes.io/projected/dbe9a043-a969-429b-b7b1-33d12296c52c-kube-api-access-852w5\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.098885 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.098971 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-528nq\" (UniqueName: \"kubernetes.io/projected/2c47ce4c-9012-4798-9bf8-127a96ad285e-kube-api-access-528nq\") pod \"keystone-operator-controller-manager-767fdc4f47-2znsh\" (UID: \"2c47ce4c-9012-4798-9bf8-127a96ad285e\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.100152 4925 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.100257 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert podName:dbe9a043-a969-429b-b7b1-33d12296c52c nodeName:}" failed. No retries permitted until 2026-01-21 11:16:11.600225972 +0000 UTC m=+1263.204117916 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert") pod "infra-operator-controller-manager-77c48c7859-dqjpf" (UID: "dbe9a043-a969-429b-b7b1-33d12296c52c") : secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.100619 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.191623 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zqnl\" (UniqueName: \"kubernetes.io/projected/c4be49a0-e872-456f-a102-928f5210524f-kube-api-access-2zqnl\") pod \"horizon-operator-controller-manager-77d5c5b54f-gcxp4\" (UID: \"c4be49a0-e872-456f-a102-928f5210524f\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.198251 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-528nq\" (UniqueName: \"kubernetes.io/projected/2c47ce4c-9012-4798-9bf8-127a96ad285e-kube-api-access-528nq\") pod \"keystone-operator-controller-manager-767fdc4f47-2znsh\" (UID: \"2c47ce4c-9012-4798-9bf8-127a96ad285e\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.203980 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfqv6\" (UniqueName: \"kubernetes.io/projected/e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23-kube-api-access-zfqv6\") pod \"glance-operator-controller-manager-c6994669c-ggpw9\" (UID: \"e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.212720 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-852w5\" (UniqueName: \"kubernetes.io/projected/dbe9a043-a969-429b-b7b1-33d12296c52c-kube-api-access-852w5\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.214821 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k96bx\" (UniqueName: \"kubernetes.io/projected/2d8c2e69-7444-465a-a418-59d9c5b20074-kube-api-access-k96bx\") pod \"ironic-operator-controller-manager-78757b4889-wdwvl\" (UID: \"2d8c2e69-7444-465a-a418-59d9c5b20074\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.215390 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.230685 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.233280 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqd72\" (UniqueName: \"kubernetes.io/projected/50c322c0-a941-48fa-bf86-c2daa64a9aa8-kube-api-access-zqd72\") pod \"designate-operator-controller-manager-9f958b845-mjg4d\" (UID: \"50c322c0-a941-48fa-bf86-c2daa64a9aa8\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.242386 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c97xx\" (UniqueName: \"kubernetes.io/projected/9b9f5cfa-93e1-4940-b7f0-066c6bc4f194-kube-api-access-c97xx\") pod \"heat-operator-controller-manager-594c8c9d5d-h9szq\" (UID: \"9b9f5cfa-93e1-4940-b7f0-066c6bc4f194\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.251655 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdb8c\" (UniqueName: \"kubernetes.io/projected/cf77bf31-5d25-4015-b274-05dbedbedf5a-kube-api-access-zdb8c\") pod \"cinder-operator-controller-manager-9b68f5989-vmg65\" (UID: \"cf77bf31-5d25-4015-b274-05dbedbedf5a\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.266585 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.273706 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.278124 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-kkvq2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.320555 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k96bx\" (UniqueName: \"kubernetes.io/projected/2d8c2e69-7444-465a-a418-59d9c5b20074-kube-api-access-k96bx\") pod \"ironic-operator-controller-manager-78757b4889-wdwvl\" (UID: \"2d8c2e69-7444-465a-a418-59d9c5b20074\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.323123 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.350143 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.351450 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.327851 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.360498 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k96bx\" (UniqueName: \"kubernetes.io/projected/2d8c2e69-7444-465a-a418-59d9c5b20074-kube-api-access-k96bx\") pod \"ironic-operator-controller-manager-78757b4889-wdwvl\" (UID: \"2d8c2e69-7444-465a-a418-59d9c5b20074\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.367607 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-mxg6j" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.367680 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.395934 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.398326 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.414333 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.422533 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.422719 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj8h7\" (UniqueName: \"kubernetes.io/projected/fc8ec38e-f941-4ba0-863e-933e10bf2043-kube-api-access-zj8h7\") pod \"manila-operator-controller-manager-864f6b75bf-x7474\" (UID: \"fc8ec38e-f941-4ba0-863e-933e10bf2043\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.428215 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.431880 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-x8ghr" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.443813 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-nqldj"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.444799 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.446933 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-xfw6h" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.459138 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.462113 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.494526 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-nqldj"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.529074 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zj8h7\" (UniqueName: \"kubernetes.io/projected/fc8ec38e-f941-4ba0-863e-933e10bf2043-kube-api-access-zj8h7\") pod \"manila-operator-controller-manager-864f6b75bf-x7474\" (UID: \"fc8ec38e-f941-4ba0-863e-933e10bf2043\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.530823 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtccn\" (UniqueName: \"kubernetes.io/projected/a7dd34dc-8a69-4c91-88ec-d1d7beffb15d-kube-api-access-wtccn\") pod \"nova-operator-controller-manager-65849867d6-nqldj\" (UID: \"a7dd34dc-8a69-4c91-88ec-d1d7beffb15d\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.531000 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h6tf\" (UniqueName: \"kubernetes.io/projected/6d27cfd1-683a-4e92-bcaf-40f1f370cd1b-kube-api-access-2h6tf\") pod \"mariadb-operator-controller-manager-c87fff755-b4cd2\" (UID: \"6d27cfd1-683a-4e92-bcaf-40f1f370cd1b\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.543721 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.545016 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.549614 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-c4gkl" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.549876 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.551151 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.555355 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.556570 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.559489 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.559831 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-6qpfp" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.560072 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-mdkkk" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.564689 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.571101 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.580690 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-84spn"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.581679 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.587870 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-jrqfm" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.602858 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.603343 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zj8h7\" (UniqueName: \"kubernetes.io/projected/fc8ec38e-f941-4ba0-863e-933e10bf2043-kube-api-access-zj8h7\") pod \"manila-operator-controller-manager-864f6b75bf-x7474\" (UID: \"fc8ec38e-f941-4ba0-863e-933e10bf2043\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.604314 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.608145 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-84spn"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.621602 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.622997 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.629734 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-f79vp" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634556 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634610 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp9bp\" (UniqueName: \"kubernetes.io/projected/a032309d-2543-4e6b-8207-d8097dffcaf5-kube-api-access-tp9bp\") pod \"ovn-operator-controller-manager-55db956ddc-l9f98\" (UID: \"a032309d-2543-4e6b-8207-d8097dffcaf5\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634670 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4952\" (UniqueName: \"kubernetes.io/projected/398ea514-c4f3-40db-8421-ebf007fda30d-kube-api-access-d4952\") pod \"neutron-operator-controller-manager-cb4666565-t9fng\" (UID: \"398ea514-c4f3-40db-8421-ebf007fda30d\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634720 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kjnc\" (UniqueName: \"kubernetes.io/projected/0fb89ff9-2ba9-4a38-b739-43fa22a5b209-kube-api-access-5kjnc\") pod \"placement-operator-controller-manager-686df47fcb-84spn\" (UID: \"0fb89ff9-2ba9-4a38-b739-43fa22a5b209\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634774 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtccn\" (UniqueName: \"kubernetes.io/projected/a7dd34dc-8a69-4c91-88ec-d1d7beffb15d-kube-api-access-wtccn\") pod \"nova-operator-controller-manager-65849867d6-nqldj\" (UID: \"a7dd34dc-8a69-4c91-88ec-d1d7beffb15d\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634811 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwkmg\" (UniqueName: \"kubernetes.io/projected/cc5d8922-f54d-42a1-b23a-622329e3f644-kube-api-access-qwkmg\") pod \"octavia-operator-controller-manager-7fc9b76cf6-44xwf\" (UID: \"cc5d8922-f54d-42a1-b23a-622329e3f644\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634849 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cvx2\" (UniqueName: \"kubernetes.io/projected/a9c52af6-912a-4e93-bbcd-42e961453471-kube-api-access-7cvx2\") pod \"swift-operator-controller-manager-85dd56d4cc-cq4k9\" (UID: \"a9c52af6-912a-4e93-bbcd-42e961453471\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634880 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634915 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mslp7\" (UniqueName: \"kubernetes.io/projected/05db7c08-87f6-4518-8d61-c87cbf0b1735-kube-api-access-mslp7\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.634950 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h6tf\" (UniqueName: \"kubernetes.io/projected/6d27cfd1-683a-4e92-bcaf-40f1f370cd1b-kube-api-access-2h6tf\") pod \"mariadb-operator-controller-manager-c87fff755-b4cd2\" (UID: \"6d27cfd1-683a-4e92-bcaf-40f1f370cd1b\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.635434 4925 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.635573 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert podName:dbe9a043-a969-429b-b7b1-33d12296c52c nodeName:}" failed. No retries permitted until 2026-01-21 11:16:12.635539355 +0000 UTC m=+1264.239431329 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert") pod "infra-operator-controller-manager-77c48c7859-dqjpf" (UID: "dbe9a043-a969-429b-b7b1-33d12296c52c") : secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.635637 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.637358 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.639029 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-44np6" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.646130 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.649786 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.653334 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.654483 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.656204 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h6tf\" (UniqueName: \"kubernetes.io/projected/6d27cfd1-683a-4e92-bcaf-40f1f370cd1b-kube-api-access-2h6tf\") pod \"mariadb-operator-controller-manager-c87fff755-b4cd2\" (UID: \"6d27cfd1-683a-4e92-bcaf-40f1f370cd1b\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.661348 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtccn\" (UniqueName: \"kubernetes.io/projected/a7dd34dc-8a69-4c91-88ec-d1d7beffb15d-kube-api-access-wtccn\") pod \"nova-operator-controller-manager-65849867d6-nqldj\" (UID: \"a7dd34dc-8a69-4c91-88ec-d1d7beffb15d\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.667136 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-lkm5r" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.684885 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.694701 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.696198 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.699769 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-zlv74" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.699832 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.705716 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.715372 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739467 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mslp7\" (UniqueName: \"kubernetes.io/projected/05db7c08-87f6-4518-8d61-c87cbf0b1735-kube-api-access-mslp7\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739596 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx8n7\" (UniqueName: \"kubernetes.io/projected/d7429a44-6eeb-419b-8193-29275baf4ad9-kube-api-access-zx8n7\") pod \"telemetry-operator-controller-manager-5f8f495fcf-gcwbr\" (UID: \"d7429a44-6eeb-419b-8193-29275baf4ad9\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739722 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp9bp\" (UniqueName: \"kubernetes.io/projected/a032309d-2543-4e6b-8207-d8097dffcaf5-kube-api-access-tp9bp\") pod \"ovn-operator-controller-manager-55db956ddc-l9f98\" (UID: \"a032309d-2543-4e6b-8207-d8097dffcaf5\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739749 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w24j\" (UniqueName: \"kubernetes.io/projected/2775a0a7-d5b2-428f-ab41-9057fed196a2-kube-api-access-2w24j\") pod \"watcher-operator-controller-manager-849fd9b886-62fs2\" (UID: \"2775a0a7-d5b2-428f-ab41-9057fed196a2\") " pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739801 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4952\" (UniqueName: \"kubernetes.io/projected/398ea514-c4f3-40db-8421-ebf007fda30d-kube-api-access-d4952\") pod \"neutron-operator-controller-manager-cb4666565-t9fng\" (UID: \"398ea514-c4f3-40db-8421-ebf007fda30d\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739872 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p899\" (UniqueName: \"kubernetes.io/projected/fd15c43d-a647-467e-a4f1-eb0ca81a123f-kube-api-access-9p899\") pod \"test-operator-controller-manager-7cd8bc9dbb-hvtnz\" (UID: \"fd15c43d-a647-467e-a4f1-eb0ca81a123f\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739898 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kjnc\" (UniqueName: \"kubernetes.io/projected/0fb89ff9-2ba9-4a38-b739-43fa22a5b209-kube-api-access-5kjnc\") pod \"placement-operator-controller-manager-686df47fcb-84spn\" (UID: \"0fb89ff9-2ba9-4a38-b739-43fa22a5b209\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.739998 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwkmg\" (UniqueName: \"kubernetes.io/projected/cc5d8922-f54d-42a1-b23a-622329e3f644-kube-api-access-qwkmg\") pod \"octavia-operator-controller-manager-7fc9b76cf6-44xwf\" (UID: \"cc5d8922-f54d-42a1-b23a-622329e3f644\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.740091 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cvx2\" (UniqueName: \"kubernetes.io/projected/a9c52af6-912a-4e93-bbcd-42e961453471-kube-api-access-7cvx2\") pod \"swift-operator-controller-manager-85dd56d4cc-cq4k9\" (UID: \"a9c52af6-912a-4e93-bbcd-42e961453471\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.740119 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.740305 4925 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.740358 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert podName:05db7c08-87f6-4518-8d61-c87cbf0b1735 nodeName:}" failed. No retries permitted until 2026-01-21 11:16:12.240343193 +0000 UTC m=+1263.844235117 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" (UID: "05db7c08-87f6-4518-8d61-c87cbf0b1735") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.778996 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kjnc\" (UniqueName: \"kubernetes.io/projected/0fb89ff9-2ba9-4a38-b739-43fa22a5b209-kube-api-access-5kjnc\") pod \"placement-operator-controller-manager-686df47fcb-84spn\" (UID: \"0fb89ff9-2ba9-4a38-b739-43fa22a5b209\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.779861 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mslp7\" (UniqueName: \"kubernetes.io/projected/05db7c08-87f6-4518-8d61-c87cbf0b1735-kube-api-access-mslp7\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.782155 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp9bp\" (UniqueName: \"kubernetes.io/projected/a032309d-2543-4e6b-8207-d8097dffcaf5-kube-api-access-tp9bp\") pod \"ovn-operator-controller-manager-55db956ddc-l9f98\" (UID: \"a032309d-2543-4e6b-8207-d8097dffcaf5\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.785905 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.785922 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4952\" (UniqueName: \"kubernetes.io/projected/398ea514-c4f3-40db-8421-ebf007fda30d-kube-api-access-d4952\") pod \"neutron-operator-controller-manager-cb4666565-t9fng\" (UID: \"398ea514-c4f3-40db-8421-ebf007fda30d\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.791680 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwkmg\" (UniqueName: \"kubernetes.io/projected/cc5d8922-f54d-42a1-b23a-622329e3f644-kube-api-access-qwkmg\") pod \"octavia-operator-controller-manager-7fc9b76cf6-44xwf\" (UID: \"cc5d8922-f54d-42a1-b23a-622329e3f644\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.792377 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cvx2\" (UniqueName: \"kubernetes.io/projected/a9c52af6-912a-4e93-bbcd-42e961453471-kube-api-access-7cvx2\") pod \"swift-operator-controller-manager-85dd56d4cc-cq4k9\" (UID: \"a9c52af6-912a-4e93-bbcd-42e961453471\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.792775 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.794111 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.795772 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.798351 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-8pv4r" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.806906 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.821200 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.824987 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.859802 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.867165 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.867321 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx8n7\" (UniqueName: \"kubernetes.io/projected/d7429a44-6eeb-419b-8193-29275baf4ad9-kube-api-access-zx8n7\") pod \"telemetry-operator-controller-manager-5f8f495fcf-gcwbr\" (UID: \"d7429a44-6eeb-419b-8193-29275baf4ad9\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.867365 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.867720 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w24j\" (UniqueName: \"kubernetes.io/projected/2775a0a7-d5b2-428f-ab41-9057fed196a2-kube-api-access-2w24j\") pod \"watcher-operator-controller-manager-849fd9b886-62fs2\" (UID: \"2775a0a7-d5b2-428f-ab41-9057fed196a2\") " pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.867833 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p899\" (UniqueName: \"kubernetes.io/projected/fd15c43d-a647-467e-a4f1-eb0ca81a123f-kube-api-access-9p899\") pod \"test-operator-controller-manager-7cd8bc9dbb-hvtnz\" (UID: \"fd15c43d-a647-467e-a4f1-eb0ca81a123f\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.867995 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86q9l\" (UniqueName: \"kubernetes.io/projected/be80c7ef-4f5f-4660-9954-5ab5b34655cf-kube-api-access-86q9l\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.870370 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.885093 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-l72gz" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.887702 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f"] Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.914660 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w24j\" (UniqueName: \"kubernetes.io/projected/2775a0a7-d5b2-428f-ab41-9057fed196a2-kube-api-access-2w24j\") pod \"watcher-operator-controller-manager-849fd9b886-62fs2\" (UID: \"2775a0a7-d5b2-428f-ab41-9057fed196a2\") " pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.916159 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p899\" (UniqueName: \"kubernetes.io/projected/fd15c43d-a647-467e-a4f1-eb0ca81a123f-kube-api-access-9p899\") pod \"test-operator-controller-manager-7cd8bc9dbb-hvtnz\" (UID: \"fd15c43d-a647-467e-a4f1-eb0ca81a123f\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.918431 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.924353 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx8n7\" (UniqueName: \"kubernetes.io/projected/d7429a44-6eeb-419b-8193-29275baf4ad9-kube-api-access-zx8n7\") pod \"telemetry-operator-controller-manager-5f8f495fcf-gcwbr\" (UID: \"d7429a44-6eeb-419b-8193-29275baf4ad9\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.975454 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86q9l\" (UniqueName: \"kubernetes.io/projected/be80c7ef-4f5f-4660-9954-5ab5b34655cf-kube-api-access-86q9l\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.976008 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.976088 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.976236 4925 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.976356 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:12.476329928 +0000 UTC m=+1264.080221922 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "webhook-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.976379 4925 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: E0121 11:16:11.976546 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:12.476457091 +0000 UTC m=+1264.080349095 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "metrics-server-cert" not found Jan 21 11:16:11 crc kubenswrapper[4925]: I0121 11:16:11.997259 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86q9l\" (UniqueName: \"kubernetes.io/projected/be80c7ef-4f5f-4660-9954-5ab5b34655cf-kube-api-access-86q9l\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.185034 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g9fm\" (UniqueName: \"kubernetes.io/projected/182d9a34-f024-4a86-8851-9e20d654f4ac-kube-api-access-4g9fm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-k7r2f\" (UID: \"182d9a34-f024-4a86-8851-9e20d654f4ac\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.186094 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.186517 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.186746 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.201977 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.291562 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g9fm\" (UniqueName: \"kubernetes.io/projected/182d9a34-f024-4a86-8851-9e20d654f4ac-kube-api-access-4g9fm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-k7r2f\" (UID: \"182d9a34-f024-4a86-8851-9e20d654f4ac\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.291625 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.292319 4925 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.292406 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert podName:05db7c08-87f6-4518-8d61-c87cbf0b1735 nodeName:}" failed. No retries permitted until 2026-01-21 11:16:13.292367688 +0000 UTC m=+1264.896259622 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" (UID: "05db7c08-87f6-4518-8d61-c87cbf0b1735") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.324460 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g9fm\" (UniqueName: \"kubernetes.io/projected/182d9a34-f024-4a86-8851-9e20d654f4ac-kube-api-access-4g9fm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-k7r2f\" (UID: \"182d9a34-f024-4a86-8851-9e20d654f4ac\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.441448 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.454577 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.464928 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4"] Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.491673 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.494743 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.494824 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.496362 4925 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.496445 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:13.496426586 +0000 UTC m=+1265.100318520 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "webhook-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.496905 4925 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.496947 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:13.496937671 +0000 UTC m=+1265.100829605 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "metrics-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.500520 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh"] Jan 21 11:16:12 crc kubenswrapper[4925]: W0121 11:16:12.526531 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4be49a0_e872_456f_a102_928f5210524f.slice/crio-7f30467a2fcfe268c6bfbca58ec7e255f9f83e7b68bcb823c66a9d8f52b90e78 WatchSource:0}: Error finding container 7f30467a2fcfe268c6bfbca58ec7e255f9f83e7b68bcb823c66a9d8f52b90e78: Status 404 returned error can't find the container with id 7f30467a2fcfe268c6bfbca58ec7e255f9f83e7b68bcb823c66a9d8f52b90e78 Jan 21 11:16:12 crc kubenswrapper[4925]: W0121 11:16:12.556288 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8031329_a6ad_49da_881e_94db9f545ab7.slice/crio-d81f266e9391d835ae29389a211a25bc3a66b756933bbd3511ebd7e7bd6b2df6 WatchSource:0}: Error finding container d81f266e9391d835ae29389a211a25bc3a66b756933bbd3511ebd7e7bd6b2df6: Status 404 returned error can't find the container with id d81f266e9391d835ae29389a211a25bc3a66b756933bbd3511ebd7e7bd6b2df6 Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.698867 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.701637 4925 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: E0121 11:16:12.701714 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert podName:dbe9a043-a969-429b-b7b1-33d12296c52c nodeName:}" failed. No retries permitted until 2026-01-21 11:16:14.701688081 +0000 UTC m=+1266.305580085 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert") pod "infra-operator-controller-manager-77c48c7859-dqjpf" (UID: "dbe9a043-a969-429b-b7b1-33d12296c52c") : secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:12 crc kubenswrapper[4925]: W0121 11:16:12.860061 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d8c2e69_7444_465a_a418_59d9c5b20074.slice/crio-a4478fb4703e5a0c48bbe3dd122af77ad874b520d3343f3f214f6bcbd005316e WatchSource:0}: Error finding container a4478fb4703e5a0c48bbe3dd122af77ad874b520d3343f3f214f6bcbd005316e: Status 404 returned error can't find the container with id a4478fb4703e5a0c48bbe3dd122af77ad874b520d3343f3f214f6bcbd005316e Jan 21 11:16:12 crc kubenswrapper[4925]: W0121 11:16:12.860480 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf77bf31_5d25_4015_b274_05dbedbedf5a.slice/crio-1866750785ff2c3f99b949c282b004030a7c207d2063888ec48484fc9eb49918 WatchSource:0}: Error finding container 1866750785ff2c3f99b949c282b004030a7c207d2063888ec48484fc9eb49918: Status 404 returned error can't find the container with id 1866750785ff2c3f99b949c282b004030a7c207d2063888ec48484fc9eb49918 Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.862231 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh"] Jan 21 11:16:12 crc kubenswrapper[4925]: W0121 11:16:12.863781 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c47ce4c_9012_4798_9bf8_127a96ad285e.slice/crio-e5bf53976662dc95257b715a33ca5f3f58ca778a1f5440bdab2102fd92affe35 WatchSource:0}: Error finding container e5bf53976662dc95257b715a33ca5f3f58ca778a1f5440bdab2102fd92affe35: Status 404 returned error can't find the container with id e5bf53976662dc95257b715a33ca5f3f58ca778a1f5440bdab2102fd92affe35 Jan 21 11:16:12 crc kubenswrapper[4925]: W0121 11:16:12.866375 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50c322c0_a941_48fa_bf86_c2daa64a9aa8.slice/crio-dbea741a5acec0ce2df2f16bb77257b8321337474e6a60a2ae5fab607b6ee4ba WatchSource:0}: Error finding container dbea741a5acec0ce2df2f16bb77257b8321337474e6a60a2ae5fab607b6ee4ba: Status 404 returned error can't find the container with id dbea741a5acec0ce2df2f16bb77257b8321337474e6a60a2ae5fab607b6ee4ba Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.868301 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl"] Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.876647 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65"] Jan 21 11:16:12 crc kubenswrapper[4925]: I0121 11:16:12.883163 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.205439 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.221696 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.337529 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.338156 4925 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.338214 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert podName:05db7c08-87f6-4518-8d61-c87cbf0b1735 nodeName:}" failed. No retries permitted until 2026-01-21 11:16:15.338196863 +0000 UTC m=+1266.942088797 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" (UID: "05db7c08-87f6-4518-8d61-c87cbf0b1735") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.344588 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.348516 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.365171 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-84spn"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.371317 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.383416 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.405104 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" event={"ID":"50c322c0-a941-48fa-bf86-c2daa64a9aa8","Type":"ContainerStarted","Data":"dbea741a5acec0ce2df2f16bb77257b8321337474e6a60a2ae5fab607b6ee4ba"} Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.412373 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.416349 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" event={"ID":"e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23","Type":"ContainerStarted","Data":"94b1698a211fa4ec5f472b7ebb0fead85af19f64b208185aa7c89a1ae914a071"} Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.422140 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.422406 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" event={"ID":"9b9f5cfa-93e1-4940-b7f0-066c6bc4f194","Type":"ContainerStarted","Data":"6ce244b23922747873123a17538fa9cdd59e2e8a6a9c1e801acc577bcb369a4c"} Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.430607 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" event={"ID":"2d8c2e69-7444-465a-a418-59d9c5b20074","Type":"ContainerStarted","Data":"a4478fb4703e5a0c48bbe3dd122af77ad874b520d3343f3f214f6bcbd005316e"} Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.433931 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr"] Jan 21 11:16:13 crc kubenswrapper[4925]: W0121 11:16:13.435115 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd15c43d_a647_467e_a4f1_eb0ca81a123f.slice/crio-428407e9c981c97d7825b128295a37bbbde58e973f302d7d9231afe87e296134 WatchSource:0}: Error finding container 428407e9c981c97d7825b128295a37bbbde58e973f302d7d9231afe87e296134: Status 404 returned error can't find the container with id 428407e9c981c97d7825b128295a37bbbde58e973f302d7d9231afe87e296134 Jan 21 11:16:13 crc kubenswrapper[4925]: W0121 11:16:13.435476 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7429a44_6eeb_419b_8193_29275baf4ad9.slice/crio-0a2eeae6e3e0d4db6852b83e00af92f714cf1c91d779d8ee168904aa505e76e3 WatchSource:0}: Error finding container 0a2eeae6e3e0d4db6852b83e00af92f714cf1c91d779d8ee168904aa505e76e3: Status 404 returned error can't find the container with id 0a2eeae6e3e0d4db6852b83e00af92f714cf1c91d779d8ee168904aa505e76e3 Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.440938 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz"] Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.442663 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9p899,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-7cd8bc9dbb-hvtnz_openstack-operators(fd15c43d-a647-467e-a4f1-eb0ca81a123f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.443799 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" podUID="fd15c43d-a647-467e-a4f1-eb0ca81a123f" Jan 21 11:16:13 crc kubenswrapper[4925]: W0121 11:16:13.445993 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7dd34dc_8a69_4c91_88ec_d1d7beffb15d.slice/crio-a4439e4960d44471dcfa3ce55bec977d172f10aac93889906a6b0e876619b81c WatchSource:0}: Error finding container a4439e4960d44471dcfa3ce55bec977d172f10aac93889906a6b0e876619b81c: Status 404 returned error can't find the container with id a4439e4960d44471dcfa3ce55bec977d172f10aac93889906a6b0e876619b81c Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.447638 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" event={"ID":"cf77bf31-5d25-4015-b274-05dbedbedf5a","Type":"ContainerStarted","Data":"1866750785ff2c3f99b949c282b004030a7c207d2063888ec48484fc9eb49918"} Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.448691 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7cvx2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-85dd56d4cc-cq4k9_openstack-operators(a9c52af6-912a-4e93-bbcd-42e961453471): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.448824 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zx8n7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-5f8f495fcf-gcwbr_openstack-operators(d7429a44-6eeb-419b-8193-29275baf4ad9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.448892 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qwkmg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7fc9b76cf6-44xwf_openstack-operators(cc5d8922-f54d-42a1-b23a-622329e3f644): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.450262 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" podUID="d7429a44-6eeb-419b-8193-29275baf4ad9" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.450312 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" podUID="a9c52af6-912a-4e93-bbcd-42e961453471" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.450345 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" podUID="cc5d8922-f54d-42a1-b23a-622329e3f644" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.453146 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wtccn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-65849867d6-nqldj_openstack-operators(a7dd34dc-8a69-4c91-88ec-d1d7beffb15d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.454178 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" event={"ID":"c4be49a0-e872-456f-a102-928f5210524f","Type":"ContainerStarted","Data":"7f30467a2fcfe268c6bfbca58ec7e255f9f83e7b68bcb823c66a9d8f52b90e78"} Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.454572 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" podUID="a7dd34dc-8a69-4c91-88ec-d1d7beffb15d" Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.455669 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" event={"ID":"2c47ce4c-9012-4798-9bf8-127a96ad285e","Type":"ContainerStarted","Data":"e5bf53976662dc95257b715a33ca5f3f58ca778a1f5440bdab2102fd92affe35"} Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.457749 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" event={"ID":"d8031329-a6ad-49da-881e-94db9f545ab7","Type":"ContainerStarted","Data":"d81f266e9391d835ae29389a211a25bc3a66b756933bbd3511ebd7e7bd6b2df6"} Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.463814 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-nqldj"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.475294 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9"] Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.480128 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4g9fm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-k7r2f_openstack-operators(182d9a34-f024-4a86-8851-9e20d654f4ac): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.482075 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" podUID="182d9a34-f024-4a86-8851-9e20d654f4ac" Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.483916 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f"] Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.545686 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:13 crc kubenswrapper[4925]: I0121 11:16:13.546136 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.545887 4925 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.546216 4925 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.546425 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:15.546389166 +0000 UTC m=+1267.150281100 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "webhook-server-cert" not found Jan 21 11:16:13 crc kubenswrapper[4925]: E0121 11:16:13.546529 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:15.546501799 +0000 UTC m=+1267.150393723 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "metrics-server-cert" not found Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.466725 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" event={"ID":"a7dd34dc-8a69-4c91-88ec-d1d7beffb15d","Type":"ContainerStarted","Data":"a4439e4960d44471dcfa3ce55bec977d172f10aac93889906a6b0e876619b81c"} Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.469160 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231\\\"\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" podUID="a7dd34dc-8a69-4c91-88ec-d1d7beffb15d" Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.469987 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" event={"ID":"a9c52af6-912a-4e93-bbcd-42e961453471","Type":"ContainerStarted","Data":"71a1dbe8e43f048d2f2a0e363661def5e0199abf94a06ae7d7fccce5c7efa7d9"} Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.471566 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92\\\"\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" podUID="a9c52af6-912a-4e93-bbcd-42e961453471" Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.473026 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" event={"ID":"398ea514-c4f3-40db-8421-ebf007fda30d","Type":"ContainerStarted","Data":"957b77dd47dc7ca97c48b2ad8de0beec8b8d8d281e4fe5d0a5a01697439fb412"} Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.474444 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" event={"ID":"6d27cfd1-683a-4e92-bcaf-40f1f370cd1b","Type":"ContainerStarted","Data":"b9a257529a8cceff211cf3b646d01032d5249756e2a287ba7c128b1a1dfe032d"} Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.480417 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" event={"ID":"182d9a34-f024-4a86-8851-9e20d654f4ac","Type":"ContainerStarted","Data":"0fd235b1bcfb9fb16c95a97ee23a1941f00daf3732b979b9f227a8b6463bdbb2"} Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.490752 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" podUID="182d9a34-f024-4a86-8851-9e20d654f4ac" Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.491726 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" event={"ID":"fc8ec38e-f941-4ba0-863e-933e10bf2043","Type":"ContainerStarted","Data":"eb0fe6e33cfd92e6c6a1156cc6b5ba7ef68ad18cc9d46c4974c3f882f0e13cc1"} Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.493373 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" event={"ID":"fd15c43d-a647-467e-a4f1-eb0ca81a123f","Type":"ContainerStarted","Data":"428407e9c981c97d7825b128295a37bbbde58e973f302d7d9231afe87e296134"} Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.496815 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e\\\"\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" podUID="fd15c43d-a647-467e-a4f1-eb0ca81a123f" Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.500283 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" event={"ID":"d7429a44-6eeb-419b-8193-29275baf4ad9","Type":"ContainerStarted","Data":"0a2eeae6e3e0d4db6852b83e00af92f714cf1c91d779d8ee168904aa505e76e3"} Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.505918 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" event={"ID":"cc5d8922-f54d-42a1-b23a-622329e3f644","Type":"ContainerStarted","Data":"f85cd2f6b0111cae516717913c0818e808d5a45b9dbc403b21610fbb916359d7"} Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.506802 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" podUID="d7429a44-6eeb-419b-8193-29275baf4ad9" Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.507734 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" podUID="cc5d8922-f54d-42a1-b23a-622329e3f644" Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.509109 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" event={"ID":"2775a0a7-d5b2-428f-ab41-9057fed196a2","Type":"ContainerStarted","Data":"7e70f18d27b11acd98e18b2e73fae0edecb5738b0d2567915fd315ad420d01a9"} Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.510709 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" event={"ID":"a032309d-2543-4e6b-8207-d8097dffcaf5","Type":"ContainerStarted","Data":"857ea57d1f252e00ae7e41a61785fba7036d61cb8c384090872359f5bf92bc3f"} Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.512056 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" event={"ID":"0fb89ff9-2ba9-4a38-b739-43fa22a5b209","Type":"ContainerStarted","Data":"598b1020e2be335d6baab7a3af295400a28a2621cb09f60374e7a625c7ba2ba1"} Jan 21 11:16:14 crc kubenswrapper[4925]: I0121 11:16:14.785135 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.785332 4925 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:14 crc kubenswrapper[4925]: E0121 11:16:14.785744 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert podName:dbe9a043-a969-429b-b7b1-33d12296c52c nodeName:}" failed. No retries permitted until 2026-01-21 11:16:18.785720694 +0000 UTC m=+1270.389612638 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert") pod "infra-operator-controller-manager-77c48c7859-dqjpf" (UID: "dbe9a043-a969-429b-b7b1-33d12296c52c") : secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:15 crc kubenswrapper[4925]: I0121 11:16:15.415580 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.416014 4925 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.416087 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert podName:05db7c08-87f6-4518-8d61-c87cbf0b1735 nodeName:}" failed. No retries permitted until 2026-01-21 11:16:19.416064693 +0000 UTC m=+1271.019956627 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" (UID: "05db7c08-87f6-4518-8d61-c87cbf0b1735") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.530742 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e\\\"\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" podUID="fd15c43d-a647-467e-a4f1-eb0ca81a123f" Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.530776 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" podUID="d7429a44-6eeb-419b-8193-29275baf4ad9" Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.530788 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" podUID="182d9a34-f024-4a86-8851-9e20d654f4ac" Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.530887 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231\\\"\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" podUID="a7dd34dc-8a69-4c91-88ec-d1d7beffb15d" Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.531010 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" podUID="cc5d8922-f54d-42a1-b23a-622329e3f644" Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.545414 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92\\\"\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" podUID="a9c52af6-912a-4e93-bbcd-42e961453471" Jan 21 11:16:15 crc kubenswrapper[4925]: I0121 11:16:15.619094 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:15 crc kubenswrapper[4925]: I0121 11:16:15.619185 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.619336 4925 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.619352 4925 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.619474 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:19.61944183 +0000 UTC m=+1271.223333834 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "webhook-server-cert" not found Jan 21 11:16:15 crc kubenswrapper[4925]: E0121 11:16:15.619495 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:19.619487562 +0000 UTC m=+1271.223379606 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "metrics-server-cert" not found Jan 21 11:16:18 crc kubenswrapper[4925]: I0121 11:16:18.862222 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:18 crc kubenswrapper[4925]: E0121 11:16:18.862405 4925 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:18 crc kubenswrapper[4925]: E0121 11:16:18.862743 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert podName:dbe9a043-a969-429b-b7b1-33d12296c52c nodeName:}" failed. No retries permitted until 2026-01-21 11:16:26.862723056 +0000 UTC m=+1278.466614990 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert") pod "infra-operator-controller-manager-77c48c7859-dqjpf" (UID: "dbe9a043-a969-429b-b7b1-33d12296c52c") : secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:19 crc kubenswrapper[4925]: I0121 11:16:19.480987 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:19 crc kubenswrapper[4925]: E0121 11:16:19.481474 4925 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:19 crc kubenswrapper[4925]: E0121 11:16:19.481542 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert podName:05db7c08-87f6-4518-8d61-c87cbf0b1735 nodeName:}" failed. No retries permitted until 2026-01-21 11:16:27.48152641 +0000 UTC m=+1279.085418334 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" (UID: "05db7c08-87f6-4518-8d61-c87cbf0b1735") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:19 crc kubenswrapper[4925]: I0121 11:16:19.685798 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:19 crc kubenswrapper[4925]: I0121 11:16:19.685950 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:19 crc kubenswrapper[4925]: E0121 11:16:19.686022 4925 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 11:16:19 crc kubenswrapper[4925]: E0121 11:16:19.686108 4925 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 11:16:19 crc kubenswrapper[4925]: E0121 11:16:19.686141 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:27.686114364 +0000 UTC m=+1279.290006298 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "metrics-server-cert" not found Jan 21 11:16:19 crc kubenswrapper[4925]: E0121 11:16:19.686242 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:27.686227767 +0000 UTC m=+1279.290119701 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "webhook-server-cert" not found Jan 21 11:16:19 crc kubenswrapper[4925]: I0121 11:16:19.941690 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:16:19 crc kubenswrapper[4925]: I0121 11:16:19.941786 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.637984 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" event={"ID":"d8031329-a6ad-49da-881e-94db9f545ab7","Type":"ContainerStarted","Data":"01b2cc14516075d600e4e57ee4ad21f437afde4346da7185f999307e97481954"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.639494 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.640956 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" event={"ID":"50c322c0-a941-48fa-bf86-c2daa64a9aa8","Type":"ContainerStarted","Data":"c7343f3777a5e9ab7c32d10ce51ccb2109438ee031917ac02de523d013ff00de"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.641483 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.643019 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" event={"ID":"398ea514-c4f3-40db-8421-ebf007fda30d","Type":"ContainerStarted","Data":"be67e0e5a1871281de9884d9251b85297abc9d77a597056cb19e985002e7589a"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.643587 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.644995 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" event={"ID":"2c47ce4c-9012-4798-9bf8-127a96ad285e","Type":"ContainerStarted","Data":"0792bf7d129bbc389c4c8256d4f5773052c8c64e9b3e55066390c59e33e80f2c"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.645764 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.647356 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" event={"ID":"a032309d-2543-4e6b-8207-d8097dffcaf5","Type":"ContainerStarted","Data":"3e7c2b51e9ba963655c4bed6bb32d1a2df9e94e4970d122312450835b17fb14e"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.647918 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.649186 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" event={"ID":"fc8ec38e-f941-4ba0-863e-933e10bf2043","Type":"ContainerStarted","Data":"17d8195590254ce8be166ba1da6d0d403f74ef8860a2e2de6a9346c772adc56a"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.649591 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.650747 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" event={"ID":"0fb89ff9-2ba9-4a38-b739-43fa22a5b209","Type":"ContainerStarted","Data":"d44af2a69a106da67b9c6ecdbb92a0272dad939fbba24b765317189131804a41"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.651111 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.652651 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" event={"ID":"2775a0a7-d5b2-428f-ab41-9057fed196a2","Type":"ContainerStarted","Data":"9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.653079 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.654199 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" event={"ID":"9b9f5cfa-93e1-4940-b7f0-066c6bc4f194","Type":"ContainerStarted","Data":"7128a15f84409c9c5872d73bf40d881f2b07fa10e9108c17fcd63334986e6401"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.654728 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.657491 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" event={"ID":"cf77bf31-5d25-4015-b274-05dbedbedf5a","Type":"ContainerStarted","Data":"162ef5ed0bc3e703c4f624b50d21204710430fb256768337dff4d765770ceb61"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.657640 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.658972 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" event={"ID":"c4be49a0-e872-456f-a102-928f5210524f","Type":"ContainerStarted","Data":"f01f8346908ae53366aeae037249b442aa3921a69fe2268677d7c694a4b2c7cb"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.659080 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.660428 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" event={"ID":"e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23","Type":"ContainerStarted","Data":"4f81c284b89e0ac7e3d089a525136bd5e714ba7ff059901e52a36912862f4674"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.660888 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.662111 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" event={"ID":"6d27cfd1-683a-4e92-bcaf-40f1f370cd1b","Type":"ContainerStarted","Data":"5521dd89e68be1d2ff4ea5e563af469b82a2fe8f0eaa32825df2b18d0ae4b807"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.662573 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.663768 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" event={"ID":"2d8c2e69-7444-465a-a418-59d9c5b20074","Type":"ContainerStarted","Data":"a9949d1accf4bcdcce0eb6eca8e2c2d0e3788c9db00120628a81b24261d9547a"} Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.664185 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.675869 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" podStartSLOduration=3.710324713 podStartE2EDuration="16.675848726s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:12.57204492 +0000 UTC m=+1264.175936854" lastFinishedPulling="2026-01-21 11:16:25.537568933 +0000 UTC m=+1277.141460867" observedRunningTime="2026-01-21 11:16:26.669148655 +0000 UTC m=+1278.273040589" watchObservedRunningTime="2026-01-21 11:16:26.675848726 +0000 UTC m=+1278.279740660" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.710853 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" podStartSLOduration=4.50797452 podStartE2EDuration="16.710834403s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.399639563 +0000 UTC m=+1265.003531507" lastFinishedPulling="2026-01-21 11:16:25.602499456 +0000 UTC m=+1277.206391390" observedRunningTime="2026-01-21 11:16:26.706262376 +0000 UTC m=+1278.310154310" watchObservedRunningTime="2026-01-21 11:16:26.710834403 +0000 UTC m=+1278.314726337" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.800725 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" podStartSLOduration=4.630060204 podStartE2EDuration="16.800702913s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.407099146 +0000 UTC m=+1265.010991070" lastFinishedPulling="2026-01-21 11:16:25.577741845 +0000 UTC m=+1277.181633779" observedRunningTime="2026-01-21 11:16:26.796305992 +0000 UTC m=+1278.400197926" watchObservedRunningTime="2026-01-21 11:16:26.800702913 +0000 UTC m=+1278.404594847" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.859596 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" podStartSLOduration=4.145994345 podStartE2EDuration="16.859572905s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:12.863987439 +0000 UTC m=+1264.467879363" lastFinishedPulling="2026-01-21 11:16:25.577565989 +0000 UTC m=+1277.181457923" observedRunningTime="2026-01-21 11:16:26.848916616 +0000 UTC m=+1278.452808540" watchObservedRunningTime="2026-01-21 11:16:26.859572905 +0000 UTC m=+1278.463464839" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.903917 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:26 crc kubenswrapper[4925]: E0121 11:16:26.904157 4925 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:26 crc kubenswrapper[4925]: E0121 11:16:26.904217 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert podName:dbe9a043-a969-429b-b7b1-33d12296c52c nodeName:}" failed. No retries permitted until 2026-01-21 11:16:42.904199461 +0000 UTC m=+1294.508091395 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert") pod "infra-operator-controller-manager-77c48c7859-dqjpf" (UID: "dbe9a043-a969-429b-b7b1-33d12296c52c") : secret "infra-operator-webhook-server-cert" not found Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.957845 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" podStartSLOduration=3.81935541 podStartE2EDuration="15.957823636s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.409825998 +0000 UTC m=+1265.013717932" lastFinishedPulling="2026-01-21 11:16:25.548294224 +0000 UTC m=+1277.152186158" observedRunningTime="2026-01-21 11:16:26.955701633 +0000 UTC m=+1278.559593567" watchObservedRunningTime="2026-01-21 11:16:26.957823636 +0000 UTC m=+1278.561715560" Jan 21 11:16:26 crc kubenswrapper[4925]: I0121 11:16:26.986587 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" podStartSLOduration=4.243297026 podStartE2EDuration="16.986523396s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:12.864364609 +0000 UTC m=+1264.468256543" lastFinishedPulling="2026-01-21 11:16:25.607590979 +0000 UTC m=+1277.211482913" observedRunningTime="2026-01-21 11:16:26.910555051 +0000 UTC m=+1278.514446985" watchObservedRunningTime="2026-01-21 11:16:26.986523396 +0000 UTC m=+1278.590415340" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.248518 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" podStartSLOduration=5.113234357 podStartE2EDuration="17.248497207s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.441350071 +0000 UTC m=+1265.045242005" lastFinishedPulling="2026-01-21 11:16:25.576612921 +0000 UTC m=+1277.180504855" observedRunningTime="2026-01-21 11:16:27.18009915 +0000 UTC m=+1278.783991084" watchObservedRunningTime="2026-01-21 11:16:27.248497207 +0000 UTC m=+1278.852389131" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.260012 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" podStartSLOduration=5.082399704 podStartE2EDuration="17.259988941s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.398913401 +0000 UTC m=+1265.002805335" lastFinishedPulling="2026-01-21 11:16:25.576502638 +0000 UTC m=+1277.180394572" observedRunningTime="2026-01-21 11:16:27.243475407 +0000 UTC m=+1278.847367341" watchObservedRunningTime="2026-01-21 11:16:27.259988941 +0000 UTC m=+1278.863880865" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.290528 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" podStartSLOduration=5.124465004 podStartE2EDuration="17.290505725s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.409698484 +0000 UTC m=+1265.013590418" lastFinishedPulling="2026-01-21 11:16:25.575739205 +0000 UTC m=+1277.179631139" observedRunningTime="2026-01-21 11:16:27.289570337 +0000 UTC m=+1278.893462271" watchObservedRunningTime="2026-01-21 11:16:27.290505725 +0000 UTC m=+1278.894397659" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.340772 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" podStartSLOduration=4.375252867 podStartE2EDuration="17.340752679s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:12.57205542 +0000 UTC m=+1264.175947354" lastFinishedPulling="2026-01-21 11:16:25.537555232 +0000 UTC m=+1277.141447166" observedRunningTime="2026-01-21 11:16:27.33644808 +0000 UTC m=+1278.940340014" watchObservedRunningTime="2026-01-21 11:16:27.340752679 +0000 UTC m=+1278.944644613" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.448896 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" podStartSLOduration=4.731895144 podStartE2EDuration="17.448878476s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:12.883123232 +0000 UTC m=+1264.487015166" lastFinishedPulling="2026-01-21 11:16:25.600106564 +0000 UTC m=+1277.203998498" observedRunningTime="2026-01-21 11:16:27.447129143 +0000 UTC m=+1279.051021077" watchObservedRunningTime="2026-01-21 11:16:27.448878476 +0000 UTC m=+1279.052770410" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.449354 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" podStartSLOduration=4.277694451 podStartE2EDuration="16.44934661s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.398072876 +0000 UTC m=+1265.001964810" lastFinishedPulling="2026-01-21 11:16:25.569725035 +0000 UTC m=+1277.173616969" observedRunningTime="2026-01-21 11:16:27.393654613 +0000 UTC m=+1278.997546547" watchObservedRunningTime="2026-01-21 11:16:27.44934661 +0000 UTC m=+1279.053238534" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.486015 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" podStartSLOduration=4.749741068 podStartE2EDuration="17.485990157s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:12.866201865 +0000 UTC m=+1264.470093799" lastFinishedPulling="2026-01-21 11:16:25.600358992 +0000 UTC m=+1277.206342888" observedRunningTime="2026-01-21 11:16:27.483024438 +0000 UTC m=+1279.086916372" watchObservedRunningTime="2026-01-21 11:16:27.485990157 +0000 UTC m=+1279.089882091" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.533988 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:27 crc kubenswrapper[4925]: E0121 11:16:27.534260 4925 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:27 crc kubenswrapper[4925]: E0121 11:16:27.534318 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert podName:05db7c08-87f6-4518-8d61-c87cbf0b1735 nodeName:}" failed. No retries permitted until 2026-01-21 11:16:43.534300703 +0000 UTC m=+1295.138192637 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" (UID: "05db7c08-87f6-4518-8d61-c87cbf0b1735") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.560249 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" podStartSLOduration=4.3926680430000005 podStartE2EDuration="16.560226579s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.409848719 +0000 UTC m=+1265.013740653" lastFinishedPulling="2026-01-21 11:16:25.577407265 +0000 UTC m=+1277.181299189" observedRunningTime="2026-01-21 11:16:27.532933212 +0000 UTC m=+1279.136825166" watchObservedRunningTime="2026-01-21 11:16:27.560226579 +0000 UTC m=+1279.164118513" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.774095 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:27 crc kubenswrapper[4925]: I0121 11:16:27.774174 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:27 crc kubenswrapper[4925]: E0121 11:16:27.776061 4925 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 11:16:27 crc kubenswrapper[4925]: E0121 11:16:27.776182 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:43.776157443 +0000 UTC m=+1295.380049417 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "webhook-server-cert" not found Jan 21 11:16:27 crc kubenswrapper[4925]: E0121 11:16:27.778086 4925 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 11:16:27 crc kubenswrapper[4925]: E0121 11:16:27.778147 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs podName:be80c7ef-4f5f-4660-9954-5ab5b34655cf nodeName:}" failed. No retries permitted until 2026-01-21 11:16:43.778129071 +0000 UTC m=+1295.382021005 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs") pod "openstack-operator-controller-manager-87d6d564b-dgm28" (UID: "be80c7ef-4f5f-4660-9954-5ab5b34655cf") : secret "metrics-server-cert" not found Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.103556 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-r4klh" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.218902 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-gcxp4" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.334993 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-vmg65" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.371851 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mjg4d" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.401319 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-h9szq" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.418816 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-c6994669c-ggpw9" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.467119 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-2znsh" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.606449 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-wdwvl" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.654085 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-x7474" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.706644 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-b4cd2" Jan 21 11:16:31 crc kubenswrapper[4925]: I0121 11:16:31.829332 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-84spn" Jan 21 11:16:32 crc kubenswrapper[4925]: I0121 11:16:32.189306 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-t9fng" Jan 21 11:16:32 crc kubenswrapper[4925]: I0121 11:16:32.190057 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-l9f98" Jan 21 11:16:32 crc kubenswrapper[4925]: I0121 11:16:32.459370 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.735300 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" event={"ID":"cc5d8922-f54d-42a1-b23a-622329e3f644","Type":"ContainerStarted","Data":"a22b183fd18228509909b3f2d09b22bcea4564a161526452cbf1c4eeb2a5e681"} Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.737264 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.739782 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" event={"ID":"fd15c43d-a647-467e-a4f1-eb0ca81a123f","Type":"ContainerStarted","Data":"2f48c5db62e879c8370cd7afca7c5a7462a3dbc52cde1f17e55dbfdabdd16556"} Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.740858 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.743937 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" event={"ID":"a7dd34dc-8a69-4c91-88ec-d1d7beffb15d","Type":"ContainerStarted","Data":"ee0adf9ac710cbf44bc104d1d293f3a22087634462d6ebee5baf5919e12f6c5b"} Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.744925 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.747190 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" event={"ID":"a9c52af6-912a-4e93-bbcd-42e961453471","Type":"ContainerStarted","Data":"bdea358eaeec1fa31ccad68709638a590dab9c55387edd0010939a5cfb8d030d"} Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.747728 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.749115 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" event={"ID":"d7429a44-6eeb-419b-8193-29275baf4ad9","Type":"ContainerStarted","Data":"ece9dd5c0f893befa0eda443e44df01560531f5076d2be66655757ddf7a224ed"} Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.749576 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.751197 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" event={"ID":"182d9a34-f024-4a86-8851-9e20d654f4ac","Type":"ContainerStarted","Data":"de9a2f106a27a91370f65cc5106183ab3c5b1ee2a5cd02262d7e51da26d5ae41"} Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.760451 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" podStartSLOduration=3.55843999 podStartE2EDuration="23.760428541s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.448722992 +0000 UTC m=+1265.052614926" lastFinishedPulling="2026-01-21 11:16:33.650711543 +0000 UTC m=+1285.254603477" observedRunningTime="2026-01-21 11:16:34.756461093 +0000 UTC m=+1286.360353037" watchObservedRunningTime="2026-01-21 11:16:34.760428541 +0000 UTC m=+1286.364320475" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.781673 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" podStartSLOduration=4.5826308860000005 podStartE2EDuration="24.781649737s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.45298858 +0000 UTC m=+1265.056880504" lastFinishedPulling="2026-01-21 11:16:33.652007421 +0000 UTC m=+1285.255899355" observedRunningTime="2026-01-21 11:16:34.776178223 +0000 UTC m=+1286.380070177" watchObservedRunningTime="2026-01-21 11:16:34.781649737 +0000 UTC m=+1286.385541681" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.807529 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-k7r2f" podStartSLOduration=3.568668487 podStartE2EDuration="23.80750819s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.479980888 +0000 UTC m=+1265.083872822" lastFinishedPulling="2026-01-21 11:16:33.718820591 +0000 UTC m=+1285.322712525" observedRunningTime="2026-01-21 11:16:34.799353606 +0000 UTC m=+1286.403245540" watchObservedRunningTime="2026-01-21 11:16:34.80750819 +0000 UTC m=+1286.411400124" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.837232 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" podStartSLOduration=3.634801938 podStartE2EDuration="23.83721213s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.448470985 +0000 UTC m=+1265.052362919" lastFinishedPulling="2026-01-21 11:16:33.650881167 +0000 UTC m=+1285.254773111" observedRunningTime="2026-01-21 11:16:34.818115158 +0000 UTC m=+1286.422007112" watchObservedRunningTime="2026-01-21 11:16:34.83721213 +0000 UTC m=+1286.441104064" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.867119 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" podStartSLOduration=3.57844379 podStartE2EDuration="23.867096524s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.44764237 +0000 UTC m=+1265.051534304" lastFinishedPulling="2026-01-21 11:16:33.736295104 +0000 UTC m=+1285.340187038" observedRunningTime="2026-01-21 11:16:34.842468057 +0000 UTC m=+1286.446360021" watchObservedRunningTime="2026-01-21 11:16:34.867096524 +0000 UTC m=+1286.470988468" Jan 21 11:16:34 crc kubenswrapper[4925]: I0121 11:16:34.874208 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" podStartSLOduration=3.554654788 podStartE2EDuration="23.874187827s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:13.442265929 +0000 UTC m=+1265.046157863" lastFinishedPulling="2026-01-21 11:16:33.761798968 +0000 UTC m=+1285.365690902" observedRunningTime="2026-01-21 11:16:34.864172297 +0000 UTC m=+1286.468064241" watchObservedRunningTime="2026-01-21 11:16:34.874187827 +0000 UTC m=+1286.478079761" Jan 21 11:16:41 crc kubenswrapper[4925]: I0121 11:16:41.790746 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-65849867d6-nqldj" Jan 21 11:16:42 crc kubenswrapper[4925]: I0121 11:16:42.010996 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-cq4k9" Jan 21 11:16:42 crc kubenswrapper[4925]: I0121 11:16:42.190675 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-44xwf" Jan 21 11:16:42 crc kubenswrapper[4925]: I0121 11:16:42.205367 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-gcwbr" Jan 21 11:16:42 crc kubenswrapper[4925]: I0121 11:16:42.445960 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-hvtnz" Jan 21 11:16:42 crc kubenswrapper[4925]: I0121 11:16:42.969349 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:42 crc kubenswrapper[4925]: I0121 11:16:42.989767 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dbe9a043-a969-429b-b7b1-33d12296c52c-cert\") pod \"infra-operator-controller-manager-77c48c7859-dqjpf\" (UID: \"dbe9a043-a969-429b-b7b1-33d12296c52c\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.004009 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.445986 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf"] Jan 21 11:16:43 crc kubenswrapper[4925]: W0121 11:16:43.446962 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbe9a043_a969_429b_b7b1_33d12296c52c.slice/crio-8e63667b3fa0133de43de991ebb230a27d7a64c6daffd979d3a1e411534b5425 WatchSource:0}: Error finding container 8e63667b3fa0133de43de991ebb230a27d7a64c6daffd979d3a1e411534b5425: Status 404 returned error can't find the container with id 8e63667b3fa0133de43de991ebb230a27d7a64c6daffd979d3a1e411534b5425 Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.579807 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.585842 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05db7c08-87f6-4518-8d61-c87cbf0b1735-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9\" (UID: \"05db7c08-87f6-4518-8d61-c87cbf0b1735\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.662701 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.782621 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.782712 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.788317 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-metrics-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.788743 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/be80c7ef-4f5f-4660-9954-5ab5b34655cf-webhook-certs\") pod \"openstack-operator-controller-manager-87d6d564b-dgm28\" (UID: \"be80c7ef-4f5f-4660-9954-5ab5b34655cf\") " pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:43 crc kubenswrapper[4925]: I0121 11:16:43.976549 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:44 crc kubenswrapper[4925]: I0121 11:16:44.296160 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" event={"ID":"dbe9a043-a969-429b-b7b1-33d12296c52c","Type":"ContainerStarted","Data":"8e63667b3fa0133de43de991ebb230a27d7a64c6daffd979d3a1e411534b5425"} Jan 21 11:16:44 crc kubenswrapper[4925]: I0121 11:16:44.324518 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9"] Jan 21 11:16:44 crc kubenswrapper[4925]: W0121 11:16:44.674107 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe80c7ef_4f5f_4660_9954_5ab5b34655cf.slice/crio-1c85503ca6fd6c62961f6ba43019eb366e1997bdaba604bf7b17c7aa347944b8 WatchSource:0}: Error finding container 1c85503ca6fd6c62961f6ba43019eb366e1997bdaba604bf7b17c7aa347944b8: Status 404 returned error can't find the container with id 1c85503ca6fd6c62961f6ba43019eb366e1997bdaba604bf7b17c7aa347944b8 Jan 21 11:16:44 crc kubenswrapper[4925]: I0121 11:16:44.677606 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28"] Jan 21 11:16:45 crc kubenswrapper[4925]: I0121 11:16:45.305462 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" event={"ID":"be80c7ef-4f5f-4660-9954-5ab5b34655cf","Type":"ContainerStarted","Data":"1c85503ca6fd6c62961f6ba43019eb366e1997bdaba604bf7b17c7aa347944b8"} Jan 21 11:16:45 crc kubenswrapper[4925]: I0121 11:16:45.306182 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" event={"ID":"05db7c08-87f6-4518-8d61-c87cbf0b1735","Type":"ContainerStarted","Data":"55dfe476772d30db8a782a63452db4bed1b0461598d1255c2d7ae7b431ea695e"} Jan 21 11:16:49 crc kubenswrapper[4925]: I0121 11:16:49.941212 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:16:49 crc kubenswrapper[4925]: I0121 11:16:49.941668 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:16:54 crc kubenswrapper[4925]: I0121 11:16:54.394329 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" event={"ID":"be80c7ef-4f5f-4660-9954-5ab5b34655cf","Type":"ContainerStarted","Data":"d20ea809486eb62c11416c3a976ef9063a0ab3a19d00d5c9973a4a50f6332f67"} Jan 21 11:16:54 crc kubenswrapper[4925]: I0121 11:16:54.395059 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:16:54 crc kubenswrapper[4925]: I0121 11:16:54.422069 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" podStartSLOduration=43.422049689 podStartE2EDuration="43.422049689s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:16:54.421680267 +0000 UTC m=+1306.025572201" watchObservedRunningTime="2026-01-21 11:16:54.422049689 +0000 UTC m=+1306.025941623" Jan 21 11:16:55 crc kubenswrapper[4925]: I0121 11:16:55.409830 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" event={"ID":"05db7c08-87f6-4518-8d61-c87cbf0b1735","Type":"ContainerStarted","Data":"d0493423c8f424a87373b9b86596b5b6c32d9588ddae41fe8a0ec78c349ec3d2"} Jan 21 11:16:55 crc kubenswrapper[4925]: I0121 11:16:55.410208 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:16:55 crc kubenswrapper[4925]: I0121 11:16:55.412085 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" event={"ID":"dbe9a043-a969-429b-b7b1-33d12296c52c","Type":"ContainerStarted","Data":"979f36486be8757006ee2ab777d1fbd9e2cdd760f8f25e4c478be99dbc2beb66"} Jan 21 11:16:55 crc kubenswrapper[4925]: I0121 11:16:55.412512 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:16:55 crc kubenswrapper[4925]: I0121 11:16:55.439562 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" podStartSLOduration=33.933358371 podStartE2EDuration="44.439539967s" podCreationTimestamp="2026-01-21 11:16:11 +0000 UTC" firstStartedPulling="2026-01-21 11:16:44.33571137 +0000 UTC m=+1295.939603304" lastFinishedPulling="2026-01-21 11:16:54.841892966 +0000 UTC m=+1306.445784900" observedRunningTime="2026-01-21 11:16:55.438197036 +0000 UTC m=+1307.042088970" watchObservedRunningTime="2026-01-21 11:16:55.439539967 +0000 UTC m=+1307.043431901" Jan 21 11:16:55 crc kubenswrapper[4925]: I0121 11:16:55.468875 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" podStartSLOduration=34.068413138 podStartE2EDuration="45.468849324s" podCreationTimestamp="2026-01-21 11:16:10 +0000 UTC" firstStartedPulling="2026-01-21 11:16:43.450024407 +0000 UTC m=+1295.053916341" lastFinishedPulling="2026-01-21 11:16:54.850460593 +0000 UTC m=+1306.454352527" observedRunningTime="2026-01-21 11:16:55.462925926 +0000 UTC m=+1307.066817860" watchObservedRunningTime="2026-01-21 11:16:55.468849324 +0000 UTC m=+1307.072741258" Jan 21 11:17:03 crc kubenswrapper[4925]: I0121 11:17:03.012001 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" Jan 21 11:17:03 crc kubenswrapper[4925]: I0121 11:17:03.669318 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9" Jan 21 11:17:03 crc kubenswrapper[4925]: I0121 11:17:03.984826 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-87d6d564b-dgm28" Jan 21 11:17:11 crc kubenswrapper[4925]: I0121 11:17:11.216169 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4"] Jan 21 11:17:11 crc kubenswrapper[4925]: I0121 11:17:11.217098 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" podUID="36235ffd-d8a9-4e8f-91ef-8c989efca81a" containerName="operator" containerID="cri-o://b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454" gracePeriod=10 Jan 21 11:17:11 crc kubenswrapper[4925]: I0121 11:17:11.224114 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2"] Jan 21 11:17:11 crc kubenswrapper[4925]: I0121 11:17:11.224382 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" podUID="2775a0a7-d5b2-428f-ab41-9057fed196a2" containerName="manager" containerID="cri-o://9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305" gracePeriod=10 Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.333663 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.339364 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.522484 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9znj\" (UniqueName: \"kubernetes.io/projected/36235ffd-d8a9-4e8f-91ef-8c989efca81a-kube-api-access-g9znj\") pod \"36235ffd-d8a9-4e8f-91ef-8c989efca81a\" (UID: \"36235ffd-d8a9-4e8f-91ef-8c989efca81a\") " Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.522662 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w24j\" (UniqueName: \"kubernetes.io/projected/2775a0a7-d5b2-428f-ab41-9057fed196a2-kube-api-access-2w24j\") pod \"2775a0a7-d5b2-428f-ab41-9057fed196a2\" (UID: \"2775a0a7-d5b2-428f-ab41-9057fed196a2\") " Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.529155 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36235ffd-d8a9-4e8f-91ef-8c989efca81a-kube-api-access-g9znj" (OuterVolumeSpecName: "kube-api-access-g9znj") pod "36235ffd-d8a9-4e8f-91ef-8c989efca81a" (UID: "36235ffd-d8a9-4e8f-91ef-8c989efca81a"). InnerVolumeSpecName "kube-api-access-g9znj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.529239 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2775a0a7-d5b2-428f-ab41-9057fed196a2-kube-api-access-2w24j" (OuterVolumeSpecName: "kube-api-access-2w24j") pod "2775a0a7-d5b2-428f-ab41-9057fed196a2" (UID: "2775a0a7-d5b2-428f-ab41-9057fed196a2"). InnerVolumeSpecName "kube-api-access-2w24j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.624751 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w24j\" (UniqueName: \"kubernetes.io/projected/2775a0a7-d5b2-428f-ab41-9057fed196a2-kube-api-access-2w24j\") on node \"crc\" DevicePath \"\"" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.624798 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9znj\" (UniqueName: \"kubernetes.io/projected/36235ffd-d8a9-4e8f-91ef-8c989efca81a-kube-api-access-g9znj\") on node \"crc\" DevicePath \"\"" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.647291 4925 generic.go:334] "Generic (PLEG): container finished" podID="2775a0a7-d5b2-428f-ab41-9057fed196a2" containerID="9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305" exitCode=0 Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.647345 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.647390 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" event={"ID":"2775a0a7-d5b2-428f-ab41-9057fed196a2","Type":"ContainerDied","Data":"9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305"} Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.647498 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2" event={"ID":"2775a0a7-d5b2-428f-ab41-9057fed196a2","Type":"ContainerDied","Data":"7e70f18d27b11acd98e18b2e73fae0edecb5738b0d2567915fd315ad420d01a9"} Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.647540 4925 scope.go:117] "RemoveContainer" containerID="9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.650959 4925 generic.go:334] "Generic (PLEG): container finished" podID="36235ffd-d8a9-4e8f-91ef-8c989efca81a" containerID="b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454" exitCode=0 Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.651037 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" event={"ID":"36235ffd-d8a9-4e8f-91ef-8c989efca81a","Type":"ContainerDied","Data":"b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454"} Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.651269 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" event={"ID":"36235ffd-d8a9-4e8f-91ef-8c989efca81a","Type":"ContainerDied","Data":"abb4d0bd266ab404f97135e3837734fadc4a56fb23355e7395832cad7af2e43b"} Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.651073 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.671749 4925 scope.go:117] "RemoveContainer" containerID="9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305" Jan 21 11:17:12 crc kubenswrapper[4925]: E0121 11:17:12.675922 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305\": container with ID starting with 9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305 not found: ID does not exist" containerID="9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.675967 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305"} err="failed to get container status \"9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305\": rpc error: code = NotFound desc = could not find container \"9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305\": container with ID starting with 9280891388d9a3e6df38c0ccb38b52e8134cc3dad593f1b6c47d2f52a60f2305 not found: ID does not exist" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.675994 4925 scope.go:117] "RemoveContainer" containerID="b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.715915 4925 scope.go:117] "RemoveContainer" containerID="b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454" Jan 21 11:17:12 crc kubenswrapper[4925]: E0121 11:17:12.723424 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454\": container with ID starting with b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454 not found: ID does not exist" containerID="b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.723481 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454"} err="failed to get container status \"b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454\": rpc error: code = NotFound desc = could not find container \"b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454\": container with ID starting with b29840cd77e445fe1107061a4b369f7219cb026336d381e82757ce03ba9a8454 not found: ID does not exist" Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.736174 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2"] Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.743211 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-849fd9b886-62fs2"] Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.882243 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4"] Jan 21 11:17:12 crc kubenswrapper[4925]: I0121 11:17:12.894105 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-controller-init-766b56994f-nkxz4"] Jan 21 11:17:13 crc kubenswrapper[4925]: I0121 11:17:13.563025 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2775a0a7-d5b2-428f-ab41-9057fed196a2" path="/var/lib/kubelet/pods/2775a0a7-d5b2-428f-ab41-9057fed196a2/volumes" Jan 21 11:17:13 crc kubenswrapper[4925]: I0121 11:17:13.563749 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36235ffd-d8a9-4e8f-91ef-8c989efca81a" path="/var/lib/kubelet/pods/36235ffd-d8a9-4e8f-91ef-8c989efca81a/volumes" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.297243 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-index-lvk6z"] Jan 21 11:17:18 crc kubenswrapper[4925]: E0121 11:17:18.297985 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2775a0a7-d5b2-428f-ab41-9057fed196a2" containerName="manager" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.298001 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="2775a0a7-d5b2-428f-ab41-9057fed196a2" containerName="manager" Jan 21 11:17:18 crc kubenswrapper[4925]: E0121 11:17:18.298028 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36235ffd-d8a9-4e8f-91ef-8c989efca81a" containerName="operator" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.298041 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="36235ffd-d8a9-4e8f-91ef-8c989efca81a" containerName="operator" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.298181 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="2775a0a7-d5b2-428f-ab41-9057fed196a2" containerName="manager" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.298200 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="36235ffd-d8a9-4e8f-91ef-8c989efca81a" containerName="operator" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.298850 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.302470 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-index-dockercfg-cwpbh" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.316159 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-index-lvk6z"] Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.445249 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9fnt\" (UniqueName: \"kubernetes.io/projected/2ac527d2-36c7-40bc-ae76-8d007f3dadb3-kube-api-access-k9fnt\") pod \"watcher-operator-index-lvk6z\" (UID: \"2ac527d2-36c7-40bc-ae76-8d007f3dadb3\") " pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.547023 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9fnt\" (UniqueName: \"kubernetes.io/projected/2ac527d2-36c7-40bc-ae76-8d007f3dadb3-kube-api-access-k9fnt\") pod \"watcher-operator-index-lvk6z\" (UID: \"2ac527d2-36c7-40bc-ae76-8d007f3dadb3\") " pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.567674 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9fnt\" (UniqueName: \"kubernetes.io/projected/2ac527d2-36c7-40bc-ae76-8d007f3dadb3-kube-api-access-k9fnt\") pod \"watcher-operator-index-lvk6z\" (UID: \"2ac527d2-36c7-40bc-ae76-8d007f3dadb3\") " pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.625504 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:18 crc kubenswrapper[4925]: I0121 11:17:18.933790 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-index-lvk6z"] Jan 21 11:17:19 crc kubenswrapper[4925]: I0121 11:17:19.717083 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-index-lvk6z" event={"ID":"2ac527d2-36c7-40bc-ae76-8d007f3dadb3","Type":"ContainerStarted","Data":"6f033128e5667490a21e90912fdd7af33870760b02388f2ef2df39be141acfd3"} Jan 21 11:17:19 crc kubenswrapper[4925]: I0121 11:17:19.942106 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:17:19 crc kubenswrapper[4925]: I0121 11:17:19.943142 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:17:19 crc kubenswrapper[4925]: I0121 11:17:19.943238 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:17:19 crc kubenswrapper[4925]: I0121 11:17:19.945844 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"67d412d76a3774c8b426878268b1816585378c0b05acfee3e5041ad5e7dbd93a"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:17:19 crc kubenswrapper[4925]: I0121 11:17:19.946013 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://67d412d76a3774c8b426878268b1816585378c0b05acfee3e5041ad5e7dbd93a" gracePeriod=600 Jan 21 11:17:20 crc kubenswrapper[4925]: I0121 11:17:20.742834 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="67d412d76a3774c8b426878268b1816585378c0b05acfee3e5041ad5e7dbd93a" exitCode=0 Jan 21 11:17:20 crc kubenswrapper[4925]: I0121 11:17:20.742937 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"67d412d76a3774c8b426878268b1816585378c0b05acfee3e5041ad5e7dbd93a"} Jan 21 11:17:20 crc kubenswrapper[4925]: I0121 11:17:20.743426 4925 scope.go:117] "RemoveContainer" containerID="e772253e4c2e0ac8edf4468d742ee24fdcac170b16df83d5dd4bb209eb0b7a25" Jan 21 11:17:21 crc kubenswrapper[4925]: I0121 11:17:21.754965 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-index-lvk6z" event={"ID":"2ac527d2-36c7-40bc-ae76-8d007f3dadb3","Type":"ContainerStarted","Data":"85822b1c159663c82c4d0401d5021acfd4f96b7085713369a3a3583d54e78387"} Jan 21 11:17:21 crc kubenswrapper[4925]: I0121 11:17:21.758346 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"6fb1cacdd241e7a8efac0b528deff5f04d57c5b631c8479e71c5d41a4ae7e250"} Jan 21 11:17:21 crc kubenswrapper[4925]: I0121 11:17:21.773886 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-index-lvk6z" podStartSLOduration=1.963921765 podStartE2EDuration="3.773864354s" podCreationTimestamp="2026-01-21 11:17:18 +0000 UTC" firstStartedPulling="2026-01-21 11:17:18.949693115 +0000 UTC m=+1330.553585049" lastFinishedPulling="2026-01-21 11:17:20.759635704 +0000 UTC m=+1332.363527638" observedRunningTime="2026-01-21 11:17:21.772232026 +0000 UTC m=+1333.376123960" watchObservedRunningTime="2026-01-21 11:17:21.773864354 +0000 UTC m=+1333.377756298" Jan 21 11:17:28 crc kubenswrapper[4925]: I0121 11:17:28.626643 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:28 crc kubenswrapper[4925]: I0121 11:17:28.627291 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:28 crc kubenswrapper[4925]: I0121 11:17:28.664499 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:28 crc kubenswrapper[4925]: I0121 11:17:28.851283 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-index-lvk6z" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.576630 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx"] Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.578736 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.580946 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-6vm2t" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.626956 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx"] Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.711486 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2pdv\" (UniqueName: \"kubernetes.io/projected/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-kube-api-access-q2pdv\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.711610 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-bundle\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.712215 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-util\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.814125 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-bundle\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.814277 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-util\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.814308 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2pdv\" (UniqueName: \"kubernetes.io/projected/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-kube-api-access-q2pdv\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.815127 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-util\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.815186 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-bundle\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.841550 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2pdv\" (UniqueName: \"kubernetes.io/projected/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-kube-api-access-q2pdv\") pod \"144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:32 crc kubenswrapper[4925]: I0121 11:17:32.900182 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:33 crc kubenswrapper[4925]: I0121 11:17:33.168194 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx"] Jan 21 11:17:33 crc kubenswrapper[4925]: I0121 11:17:33.859691 4925 generic.go:334] "Generic (PLEG): container finished" podID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerID="92b9097a245d9b3ffa4155977d0f41ba4b309a535ef61aca3cedf80e4bca0869" exitCode=0 Jan 21 11:17:33 crc kubenswrapper[4925]: I0121 11:17:33.859915 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" event={"ID":"9c5f0822-d0f9-4273-8e7f-e2f91d277a01","Type":"ContainerDied","Data":"92b9097a245d9b3ffa4155977d0f41ba4b309a535ef61aca3cedf80e4bca0869"} Jan 21 11:17:33 crc kubenswrapper[4925]: I0121 11:17:33.860262 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" event={"ID":"9c5f0822-d0f9-4273-8e7f-e2f91d277a01","Type":"ContainerStarted","Data":"4df6af23abfe6b2351c7750dff66aae3960e4b46c42f8d08f0ecbf8e25e11c04"} Jan 21 11:17:34 crc kubenswrapper[4925]: I0121 11:17:34.869289 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" event={"ID":"9c5f0822-d0f9-4273-8e7f-e2f91d277a01","Type":"ContainerStarted","Data":"b6eaa66b7783ed43bf11aa7ca6b170ca2c102f545a8d03d29b829526058d99e6"} Jan 21 11:17:35 crc kubenswrapper[4925]: I0121 11:17:35.879858 4925 generic.go:334] "Generic (PLEG): container finished" podID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerID="b6eaa66b7783ed43bf11aa7ca6b170ca2c102f545a8d03d29b829526058d99e6" exitCode=0 Jan 21 11:17:35 crc kubenswrapper[4925]: I0121 11:17:35.879933 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" event={"ID":"9c5f0822-d0f9-4273-8e7f-e2f91d277a01","Type":"ContainerDied","Data":"b6eaa66b7783ed43bf11aa7ca6b170ca2c102f545a8d03d29b829526058d99e6"} Jan 21 11:17:36 crc kubenswrapper[4925]: I0121 11:17:36.890222 4925 generic.go:334] "Generic (PLEG): container finished" podID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerID="42c0513ebf53e6b33565420e7742ff06943da84491d860b54f75d207fd67d9e0" exitCode=0 Jan 21 11:17:36 crc kubenswrapper[4925]: I0121 11:17:36.890327 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" event={"ID":"9c5f0822-d0f9-4273-8e7f-e2f91d277a01","Type":"ContainerDied","Data":"42c0513ebf53e6b33565420e7742ff06943da84491d860b54f75d207fd67d9e0"} Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.232050 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.382729 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-util\") pod \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.382848 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-bundle\") pod \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.382894 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2pdv\" (UniqueName: \"kubernetes.io/projected/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-kube-api-access-q2pdv\") pod \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\" (UID: \"9c5f0822-d0f9-4273-8e7f-e2f91d277a01\") " Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.384247 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-bundle" (OuterVolumeSpecName: "bundle") pod "9c5f0822-d0f9-4273-8e7f-e2f91d277a01" (UID: "9c5f0822-d0f9-4273-8e7f-e2f91d277a01"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.389987 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-kube-api-access-q2pdv" (OuterVolumeSpecName: "kube-api-access-q2pdv") pod "9c5f0822-d0f9-4273-8e7f-e2f91d277a01" (UID: "9c5f0822-d0f9-4273-8e7f-e2f91d277a01"). InnerVolumeSpecName "kube-api-access-q2pdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.396465 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-util" (OuterVolumeSpecName: "util") pod "9c5f0822-d0f9-4273-8e7f-e2f91d277a01" (UID: "9c5f0822-d0f9-4273-8e7f-e2f91d277a01"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.484140 4925 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.484216 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2pdv\" (UniqueName: \"kubernetes.io/projected/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-kube-api-access-q2pdv\") on node \"crc\" DevicePath \"\"" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.484227 4925 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9c5f0822-d0f9-4273-8e7f-e2f91d277a01-util\") on node \"crc\" DevicePath \"\"" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.914364 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" event={"ID":"9c5f0822-d0f9-4273-8e7f-e2f91d277a01","Type":"ContainerDied","Data":"4df6af23abfe6b2351c7750dff66aae3960e4b46c42f8d08f0ecbf8e25e11c04"} Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.914764 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4df6af23abfe6b2351c7750dff66aae3960e4b46c42f8d08f0ecbf8e25e11c04" Jan 21 11:17:38 crc kubenswrapper[4925]: I0121 11:17:38.914451 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.495257 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p"] Jan 21 11:17:44 crc kubenswrapper[4925]: E0121 11:17:44.496241 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerName="extract" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.496264 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerName="extract" Jan 21 11:17:44 crc kubenswrapper[4925]: E0121 11:17:44.496281 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerName="pull" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.496290 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerName="pull" Jan 21 11:17:44 crc kubenswrapper[4925]: E0121 11:17:44.496309 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerName="util" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.496317 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerName="util" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.496617 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5f0822-d0f9-4273-8e7f-e2f91d277a01" containerName="extract" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.497437 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.500842 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-zlv74" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.500846 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-service-cert" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.515641 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p"] Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.663564 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-webhook-cert\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.663929 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-apiservice-cert\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.664143 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mn9r\" (UniqueName: \"kubernetes.io/projected/79af5c99-6ebe-404d-aa8d-f940aa7e6345-kube-api-access-2mn9r\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.766035 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mn9r\" (UniqueName: \"kubernetes.io/projected/79af5c99-6ebe-404d-aa8d-f940aa7e6345-kube-api-access-2mn9r\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.766170 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-webhook-cert\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.766251 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-apiservice-cert\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.772704 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-apiservice-cert\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.773249 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-webhook-cert\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.784535 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mn9r\" (UniqueName: \"kubernetes.io/projected/79af5c99-6ebe-404d-aa8d-f940aa7e6345-kube-api-access-2mn9r\") pod \"watcher-operator-controller-manager-7f46b8b4c7-9248p\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:44 crc kubenswrapper[4925]: I0121 11:17:44.834219 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:45 crc kubenswrapper[4925]: I0121 11:17:45.614646 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p"] Jan 21 11:17:45 crc kubenswrapper[4925]: I0121 11:17:45.978928 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" event={"ID":"79af5c99-6ebe-404d-aa8d-f940aa7e6345","Type":"ContainerStarted","Data":"4974eb26d9f16f2da693f12da13787ec049058665630a55fdcf2e5ba2fc9a9d1"} Jan 21 11:17:45 crc kubenswrapper[4925]: I0121 11:17:45.979037 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" event={"ID":"79af5c99-6ebe-404d-aa8d-f940aa7e6345","Type":"ContainerStarted","Data":"b8e15de6c19ac8f936cceebc2a8d8f64fa88a908ffcf85154c07445ca51b86ab"} Jan 21 11:17:45 crc kubenswrapper[4925]: I0121 11:17:45.979082 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:46 crc kubenswrapper[4925]: I0121 11:17:46.001580 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" podStartSLOduration=2.001558083 podStartE2EDuration="2.001558083s" podCreationTimestamp="2026-01-21 11:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:17:46.00114605 +0000 UTC m=+1357.605037994" watchObservedRunningTime="2026-01-21 11:17:46.001558083 +0000 UTC m=+1357.605450007" Jan 21 11:17:54 crc kubenswrapper[4925]: I0121 11:17:54.840077 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.287516 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-696df99475-8gncw"] Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.295929 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.312156 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-696df99475-8gncw"] Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.357611 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d8433174-98d9-44ec-924f-6fe639538b64-webhook-cert\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.357746 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d8433174-98d9-44ec-924f-6fe639538b64-apiservice-cert\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.357976 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9njvf\" (UniqueName: \"kubernetes.io/projected/d8433174-98d9-44ec-924f-6fe639538b64-kube-api-access-9njvf\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.459724 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9njvf\" (UniqueName: \"kubernetes.io/projected/d8433174-98d9-44ec-924f-6fe639538b64-kube-api-access-9njvf\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.459836 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d8433174-98d9-44ec-924f-6fe639538b64-webhook-cert\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.459922 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d8433174-98d9-44ec-924f-6fe639538b64-apiservice-cert\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.468698 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d8433174-98d9-44ec-924f-6fe639538b64-apiservice-cert\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.484488 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d8433174-98d9-44ec-924f-6fe639538b64-webhook-cert\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.487465 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9njvf\" (UniqueName: \"kubernetes.io/projected/d8433174-98d9-44ec-924f-6fe639538b64-kube-api-access-9njvf\") pod \"watcher-operator-controller-manager-696df99475-8gncw\" (UID: \"d8433174-98d9-44ec-924f-6fe639538b64\") " pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:57 crc kubenswrapper[4925]: I0121 11:17:57.684057 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:58 crc kubenswrapper[4925]: I0121 11:17:58.386800 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-696df99475-8gncw"] Jan 21 11:17:59 crc kubenswrapper[4925]: I0121 11:17:59.320964 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" event={"ID":"d8433174-98d9-44ec-924f-6fe639538b64","Type":"ContainerStarted","Data":"dba6071ac11b4893ba391b1bf8fdd492d04bb22721553803db3b1b52281f884c"} Jan 21 11:17:59 crc kubenswrapper[4925]: I0121 11:17:59.321332 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:17:59 crc kubenswrapper[4925]: I0121 11:17:59.321348 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" event={"ID":"d8433174-98d9-44ec-924f-6fe639538b64","Type":"ContainerStarted","Data":"9658c76dabfb20079a6f1dcc5cf3bf559cd29973fbeea68e66023c901ab62c4b"} Jan 21 11:17:59 crc kubenswrapper[4925]: I0121 11:17:59.349508 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" podStartSLOduration=2.349473531 podStartE2EDuration="2.349473531s" podCreationTimestamp="2026-01-21 11:17:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:17:59.340941376 +0000 UTC m=+1370.944833310" watchObservedRunningTime="2026-01-21 11:17:59.349473531 +0000 UTC m=+1370.953365475" Jan 21 11:18:07 crc kubenswrapper[4925]: I0121 11:18:07.695209 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-696df99475-8gncw" Jan 21 11:18:07 crc kubenswrapper[4925]: I0121 11:18:07.783373 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p"] Jan 21 11:18:07 crc kubenswrapper[4925]: I0121 11:18:07.783669 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" podUID="79af5c99-6ebe-404d-aa8d-f940aa7e6345" containerName="manager" containerID="cri-o://4974eb26d9f16f2da693f12da13787ec049058665630a55fdcf2e5ba2fc9a9d1" gracePeriod=10 Jan 21 11:18:08 crc kubenswrapper[4925]: I0121 11:18:08.640738 4925 generic.go:334] "Generic (PLEG): container finished" podID="79af5c99-6ebe-404d-aa8d-f940aa7e6345" containerID="4974eb26d9f16f2da693f12da13787ec049058665630a55fdcf2e5ba2fc9a9d1" exitCode=0 Jan 21 11:18:08 crc kubenswrapper[4925]: I0121 11:18:08.640827 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" event={"ID":"79af5c99-6ebe-404d-aa8d-f940aa7e6345","Type":"ContainerDied","Data":"4974eb26d9f16f2da693f12da13787ec049058665630a55fdcf2e5ba2fc9a9d1"} Jan 21 11:18:08 crc kubenswrapper[4925]: I0121 11:18:08.994565 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.024764 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-apiservice-cert\") pod \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.024861 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mn9r\" (UniqueName: \"kubernetes.io/projected/79af5c99-6ebe-404d-aa8d-f940aa7e6345-kube-api-access-2mn9r\") pod \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.024911 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-webhook-cert\") pod \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\" (UID: \"79af5c99-6ebe-404d-aa8d-f940aa7e6345\") " Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.032849 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "79af5c99-6ebe-404d-aa8d-f940aa7e6345" (UID: "79af5c99-6ebe-404d-aa8d-f940aa7e6345"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.033038 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79af5c99-6ebe-404d-aa8d-f940aa7e6345-kube-api-access-2mn9r" (OuterVolumeSpecName: "kube-api-access-2mn9r") pod "79af5c99-6ebe-404d-aa8d-f940aa7e6345" (UID: "79af5c99-6ebe-404d-aa8d-f940aa7e6345"). InnerVolumeSpecName "kube-api-access-2mn9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.041539 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "79af5c99-6ebe-404d-aa8d-f940aa7e6345" (UID: "79af5c99-6ebe-404d-aa8d-f940aa7e6345"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.126622 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mn9r\" (UniqueName: \"kubernetes.io/projected/79af5c99-6ebe-404d-aa8d-f940aa7e6345-kube-api-access-2mn9r\") on node \"crc\" DevicePath \"\"" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.126695 4925 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.126712 4925 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79af5c99-6ebe-404d-aa8d-f940aa7e6345-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.651886 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" event={"ID":"79af5c99-6ebe-404d-aa8d-f940aa7e6345","Type":"ContainerDied","Data":"b8e15de6c19ac8f936cceebc2a8d8f64fa88a908ffcf85154c07445ca51b86ab"} Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.651964 4925 scope.go:117] "RemoveContainer" containerID="4974eb26d9f16f2da693f12da13787ec049058665630a55fdcf2e5ba2fc9a9d1" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.651995 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p" Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.679342 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p"] Jan 21 11:18:09 crc kubenswrapper[4925]: I0121 11:18:09.685838 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f46b8b4c7-9248p"] Jan 21 11:18:11 crc kubenswrapper[4925]: I0121 11:18:11.529113 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79af5c99-6ebe-404d-aa8d-f940aa7e6345" path="/var/lib/kubelet/pods/79af5c99-6ebe-404d-aa8d-f940aa7e6345/volumes" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.887189 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/rabbitmq-server-0"] Jan 21 11:18:20 crc kubenswrapper[4925]: E0121 11:18:20.888237 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79af5c99-6ebe-404d-aa8d-f940aa7e6345" containerName="manager" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.888257 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="79af5c99-6ebe-404d-aa8d-f940aa7e6345" containerName="manager" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.888500 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="79af5c99-6ebe-404d-aa8d-f940aa7e6345" containerName="manager" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.889414 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.892148 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-erlang-cookie" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.892660 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-config-data" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.896282 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-server-dockercfg-vfmn8" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.896633 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-rabbitmq-svc" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.897004 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openshift-service-ca.crt" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.897700 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-plugins-conf" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.897832 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-server-conf" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.898157 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"kube-root-ca.crt" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.898217 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-default-user" Jan 21 11:18:20 crc kubenswrapper[4925]: I0121 11:18:20.984167 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-server-0"] Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.076631 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.076702 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.076755 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-config-data\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.076808 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrmkx\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-kube-api-access-jrmkx\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.076857 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.076905 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.077067 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b7c93089-4b7c-45c7-aa48-64622e536032-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.077130 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-73488c04-79bc-4494-8546-14718a42967d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73488c04-79bc-4494-8546-14718a42967d\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.077190 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.077245 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.077283 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b7c93089-4b7c-45c7-aa48-64622e536032-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179199 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179257 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179298 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-config-data\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179328 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrmkx\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-kube-api-access-jrmkx\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179369 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179411 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179444 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b7c93089-4b7c-45c7-aa48-64622e536032-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179464 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-73488c04-79bc-4494-8546-14718a42967d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73488c04-79bc-4494-8546-14718a42967d\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179491 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179524 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.179599 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b7c93089-4b7c-45c7-aa48-64622e536032-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.180510 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.181074 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.181446 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.183359 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.183984 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7c93089-4b7c-45c7-aa48-64622e536032-config-data\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.185795 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b7c93089-4b7c-45c7-aa48-64622e536032-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.186379 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.187212 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.188932 4925 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.188968 4925 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-73488c04-79bc-4494-8546-14718a42967d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73488c04-79bc-4494-8546-14718a42967d\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fb89d18829c38569fce1895bbcfebc0106f559d5931dd2b6a918490889810db2/globalmount\"" pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.192610 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b7c93089-4b7c-45c7-aa48-64622e536032-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.200658 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrmkx\" (UniqueName: \"kubernetes.io/projected/b7c93089-4b7c-45c7-aa48-64622e536032-kube-api-access-jrmkx\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.223585 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-73488c04-79bc-4494-8546-14718a42967d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73488c04-79bc-4494-8546-14718a42967d\") pod \"rabbitmq-server-0\" (UID: \"b7c93089-4b7c-45c7-aa48-64622e536032\") " pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.245587 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/rabbitmq-notifications-server-0"] Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.251738 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.258464 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-notifications-plugins-conf" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.258695 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-notifications-default-user" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.262763 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-notifications-config-data" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.263061 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-notifications-server-dockercfg-x4b9v" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.263257 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"rabbitmq-notifications-erlang-cookie" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.263531 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"rabbitmq-notifications-server-conf" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.263725 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-rabbitmq-notifications-svc" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.282094 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-notifications-server-0"] Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.299801 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385497 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npxcl\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-kube-api-access-npxcl\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385578 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385631 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385675 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385707 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4c494924-513c-4575-a9c9-78e15c3751bc-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385749 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385800 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385841 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4c494924-513c-4575-a9c9-78e15c3751bc-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385874 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385907 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.385942 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.487648 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4c494924-513c-4575-a9c9-78e15c3751bc-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488061 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488122 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488202 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488278 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npxcl\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-kube-api-access-npxcl\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488343 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488385 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488440 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488466 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4c494924-513c-4575-a9c9-78e15c3751bc-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488498 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.488846 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.489650 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.492413 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.494823 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.496139 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c494924-513c-4575-a9c9-78e15c3751bc-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.506526 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.506878 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4c494924-513c-4575-a9c9-78e15c3751bc-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.507051 4925 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.507093 4925 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f1834b0820e590433972463231beca02773493ac76130da0daa483a45dfde471/globalmount\"" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.507216 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.521375 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4c494924-513c-4575-a9c9-78e15c3751bc-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.522283 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.543724 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npxcl\" (UniqueName: \"kubernetes.io/projected/4c494924-513c-4575-a9c9-78e15c3751bc-kube-api-access-npxcl\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.578473 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8832f6b4-4613-4924-b91a-22540cd3c25b\") pod \"rabbitmq-notifications-server-0\" (UID: \"4c494924-513c-4575-a9c9-78e15c3751bc\") " pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.584026 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:18:21 crc kubenswrapper[4925]: I0121 11:18:21.903937 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-server-0"] Jan 21 11:18:22 crc kubenswrapper[4925]: W0121 11:18:22.079049 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c494924_513c_4575_a9c9_78e15c3751bc.slice/crio-d7f957284de3ae7f941287e3557e0411998685832f65ad2367a817e14777ff4e WatchSource:0}: Error finding container d7f957284de3ae7f941287e3557e0411998685832f65ad2367a817e14777ff4e: Status 404 returned error can't find the container with id d7f957284de3ae7f941287e3557e0411998685832f65ad2367a817e14777ff4e Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.079691 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/rabbitmq-notifications-server-0"] Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.502177 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/openstack-galera-0"] Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.505928 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.516007 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openstack-config-data" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.519223 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openstack-scripts" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.520332 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-galera-openstack-svc" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.520449 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"galera-openstack-dockercfg-hh49s" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.536314 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"combined-ca-bundle" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.539936 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstack-galera-0"] Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611035 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76fd86c-08d3-47af-af39-e3336a2f5c0b-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611139 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-kolla-config\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611169 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5r7c\" (UniqueName: \"kubernetes.io/projected/a76fd86c-08d3-47af-af39-e3336a2f5c0b-kube-api-access-q5r7c\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611282 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611321 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-fc279448-1682-4346-9953-01e59757f69d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fc279448-1682-4346-9953-01e59757f69d\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611381 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a76fd86c-08d3-47af-af39-e3336a2f5c0b-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611573 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a76fd86c-08d3-47af-af39-e3336a2f5c0b-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.611657 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-config-data-default\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713419 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-config-data-default\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713520 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76fd86c-08d3-47af-af39-e3336a2f5c0b-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713564 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-kolla-config\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713592 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5r7c\" (UniqueName: \"kubernetes.io/projected/a76fd86c-08d3-47af-af39-e3336a2f5c0b-kube-api-access-q5r7c\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713675 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713708 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-fc279448-1682-4346-9953-01e59757f69d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fc279448-1682-4346-9953-01e59757f69d\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713750 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a76fd86c-08d3-47af-af39-e3336a2f5c0b-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.713814 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a76fd86c-08d3-47af-af39-e3336a2f5c0b-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.714958 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-config-data-default\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.715958 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-kolla-config\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.716868 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a76fd86c-08d3-47af-af39-e3336a2f5c0b-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.718180 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a76fd86c-08d3-47af-af39-e3336a2f5c0b-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.718915 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.720661 4925 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.720724 4925 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-fc279448-1682-4346-9953-01e59757f69d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fc279448-1682-4346-9953-01e59757f69d\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/74e8c6f380ca5253de393b332bd49e2c2fb309ef6ba87b78afe322be82470cff/globalmount\"" pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.723652 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a76fd86c-08d3-47af-af39-e3336a2f5c0b-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.727793 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.731140 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"memcached-config-data" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.732169 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-memcached-svc" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.732338 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"memcached-memcached-dockercfg-9c69d" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.737599 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76fd86c-08d3-47af-af39-e3336a2f5c0b-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.756526 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5r7c\" (UniqueName: \"kubernetes.io/projected/a76fd86c-08d3-47af-af39-e3336a2f5c0b-kube-api-access-q5r7c\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.757087 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.775607 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-fc279448-1682-4346-9953-01e59757f69d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fc279448-1682-4346-9953-01e59757f69d\") pod \"openstack-galera-0\" (UID: \"a76fd86c-08d3-47af-af39-e3336a2f5c0b\") " pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.783591 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"4c494924-513c-4575-a9c9-78e15c3751bc","Type":"ContainerStarted","Data":"d7f957284de3ae7f941287e3557e0411998685832f65ad2367a817e14777ff4e"} Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.791799 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"b7c93089-4b7c-45c7-aa48-64622e536032","Type":"ContainerStarted","Data":"de0c397b2b0c6f46334c8c2054dba5bd6e332e309bdc6166b2d7766e32cceba3"} Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.818905 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.819072 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kolla-config\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.819457 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.819556 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-config-data\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.819876 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6zmt\" (UniqueName: \"kubernetes.io/projected/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kube-api-access-h6zmt\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.856460 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.922795 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.923419 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kolla-config\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.923484 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.923547 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-config-data\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:22 crc kubenswrapper[4925]: I0121 11:18:22.923604 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6zmt\" (UniqueName: \"kubernetes.io/projected/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kube-api-access-h6zmt\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.053652 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-config-data\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.057875 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kolla-config\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.067848 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.073322 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.105840 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6zmt\" (UniqueName: \"kubernetes.io/projected/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kube-api-access-h6zmt\") pod \"memcached-0\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.144865 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.569730 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.570957 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.575213 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"telemetry-ceilometer-dockercfg-njc8x" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.580388 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.814974 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjwr6\" (UniqueName: \"kubernetes.io/projected/39b2180d-2f0e-472f-937f-3b25cf112bae-kube-api-access-hjwr6\") pod \"kube-state-metrics-0\" (UID: \"39b2180d-2f0e-472f-937f-3b25cf112bae\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.935118 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjwr6\" (UniqueName: \"kubernetes.io/projected/39b2180d-2f0e-472f-937f-3b25cf112bae-kube-api-access-hjwr6\") pod \"kube-state-metrics-0\" (UID: \"39b2180d-2f0e-472f-937f-3b25cf112bae\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:18:23 crc kubenswrapper[4925]: I0121 11:18:23.987233 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjwr6\" (UniqueName: \"kubernetes.io/projected/39b2180d-2f0e-472f-937f-3b25cf112bae-kube-api-access-hjwr6\") pod \"kube-state-metrics-0\" (UID: \"39b2180d-2f0e-472f-937f-3b25cf112bae\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:18:24 crc kubenswrapper[4925]: I0121 11:18:24.351092 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:18:24 crc kubenswrapper[4925]: I0121 11:18:24.437986 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstack-galera-0"] Jan 21 11:18:24 crc kubenswrapper[4925]: I0121 11:18:24.652778 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:18:24 crc kubenswrapper[4925]: I0121 11:18:24.906230 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"a76fd86c-08d3-47af-af39-e3336a2f5c0b","Type":"ContainerStarted","Data":"95f5568eb95164258e7754431f27006aa17907216ffb0aa7ef6c6d4b903fa38d"} Jan 21 11:18:24 crc kubenswrapper[4925]: I0121 11:18:24.952070 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"fcfc5c97-6b6f-41b2-8c2b-265e178b2645","Type":"ContainerStarted","Data":"37844f5d7f27bf4321d9749cc8db68f970d3f95cdc48164e8f42e33f67eb9471"} Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.395150 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.448412 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.472311 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.472690 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-web-config" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.472767 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.472899 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.472978 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-2" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.473053 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-tls-assets-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.473299 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"metric-storage-prometheus-dockercfg-6kxpc" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.506502 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-1" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.546450 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580031 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580090 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5d57ee6d-3979-4874-9325-3922afacba25-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580113 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580301 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580348 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-config\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580372 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580414 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580444 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7wfp\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-kube-api-access-j7wfp\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580487 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.580504 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682581 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682655 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5d57ee6d-3979-4874-9325-3922afacba25-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682679 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682711 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682746 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-config\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682769 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682913 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682944 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7wfp\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-kube-api-access-j7wfp\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.682984 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.683007 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.692746 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.692870 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.695925 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.712504 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.714986 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5d57ee6d-3979-4874-9325-3922afacba25-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.716200 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.716270 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.765851 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.771370 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-config\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.778381 4925 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.778449 4925 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/96a0132b9b32581b5fb9ddbb16c4fadae4ddfa3fdc0501538288252f0717dbd1/globalmount\"" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:25 crc kubenswrapper[4925]: I0121 11:18:25.784574 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7wfp\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-kube-api-access-j7wfp\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.162431 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"39b2180d-2f0e-472f-937f-3b25cf112bae","Type":"ContainerStarted","Data":"a81a8ddc36d261c2b07ae470ecc2c97c58e43ed3916648cba2f02ee6f548e67e"} Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.186265 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s"] Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.193462 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.201095 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.204846 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-tg9v4" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.364691 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s"] Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.373293 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.384610 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5dc6045-7192-42dc-b653-a71b80a9f119-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-mmj2s\" (UID: \"a5dc6045-7192-42dc-b653-a71b80a9f119\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.384691 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rqp2\" (UniqueName: \"kubernetes.io/projected/a5dc6045-7192-42dc-b653-a71b80a9f119-kube-api-access-7rqp2\") pod \"observability-ui-dashboards-66cbf594b5-mmj2s\" (UID: \"a5dc6045-7192-42dc-b653-a71b80a9f119\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.415352 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.486349 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5dc6045-7192-42dc-b653-a71b80a9f119-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-mmj2s\" (UID: \"a5dc6045-7192-42dc-b653-a71b80a9f119\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.486453 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rqp2\" (UniqueName: \"kubernetes.io/projected/a5dc6045-7192-42dc-b653-a71b80a9f119-kube-api-access-7rqp2\") pod \"observability-ui-dashboards-66cbf594b5-mmj2s\" (UID: \"a5dc6045-7192-42dc-b653-a71b80a9f119\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:26 crc kubenswrapper[4925]: E0121 11:18:26.487217 4925 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Jan 21 11:18:26 crc kubenswrapper[4925]: E0121 11:18:26.487298 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a5dc6045-7192-42dc-b653-a71b80a9f119-serving-cert podName:a5dc6045-7192-42dc-b653-a71b80a9f119 nodeName:}" failed. No retries permitted until 2026-01-21 11:18:26.987267758 +0000 UTC m=+1398.591159692 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/a5dc6045-7192-42dc-b653-a71b80a9f119-serving-cert") pod "observability-ui-dashboards-66cbf594b5-mmj2s" (UID: "a5dc6045-7192-42dc-b653-a71b80a9f119") : secret "observability-ui-dashboards" not found Jan 21 11:18:26 crc kubenswrapper[4925]: I0121 11:18:26.529443 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rqp2\" (UniqueName: \"kubernetes.io/projected/a5dc6045-7192-42dc-b653-a71b80a9f119-kube-api-access-7rqp2\") pod \"observability-ui-dashboards-66cbf594b5-mmj2s\" (UID: \"a5dc6045-7192-42dc-b653-a71b80a9f119\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.194015 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5dc6045-7192-42dc-b653-a71b80a9f119-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-mmj2s\" (UID: \"a5dc6045-7192-42dc-b653-a71b80a9f119\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.202558 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5dc6045-7192-42dc-b653-a71b80a9f119-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-mmj2s\" (UID: \"a5dc6045-7192-42dc-b653-a71b80a9f119\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.210508 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-64479565b-fzhdc"] Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.211732 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.238086 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64479565b-fzhdc"] Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.309563 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.368354 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/alertmanager-metric-storage-0"] Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.374365 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.395806 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"alertmanager-metric-storage-tls-assets-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.396181 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"alertmanager-metric-storage-generated" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.396584 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"metric-storage-alertmanager-dockercfg-mgtxw" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.396971 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"alertmanager-metric-storage-cluster-tls-config" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.397328 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"alertmanager-metric-storage-web-config" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.414472 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-service-ca\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.414559 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-console-config\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.414598 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c315b0b-2610-4eb5-822a-2cef565890d1-console-oauth-config\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.414634 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-trusted-ca-bundle\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.414677 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c315b0b-2610-4eb5-822a-2cef565890d1-console-serving-cert\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.414697 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlqll\" (UniqueName: \"kubernetes.io/projected/4c315b0b-2610-4eb5-822a-2cef565890d1-kube-api-access-wlqll\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.414726 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-oauth-serving-cert\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705580 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/938ddfe0-198f-4050-af00-6c195ffaa41e-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705652 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705682 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c315b0b-2610-4eb5-822a-2cef565890d1-console-serving-cert\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705707 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlqll\" (UniqueName: \"kubernetes.io/projected/4c315b0b-2610-4eb5-822a-2cef565890d1-kube-api-access-wlqll\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705743 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-oauth-serving-cert\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705814 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-service-ca\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705842 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8vpq\" (UniqueName: \"kubernetes.io/projected/938ddfe0-198f-4050-af00-6c195ffaa41e-kube-api-access-l8vpq\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705875 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705915 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/938ddfe0-198f-4050-af00-6c195ffaa41e-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705943 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.705969 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-console-config\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.706011 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c315b0b-2610-4eb5-822a-2cef565890d1-console-oauth-config\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.706047 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-trusted-ca-bundle\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.706079 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/938ddfe0-198f-4050-af00-6c195ffaa41e-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.718013 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-oauth-serving-cert\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.718361 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-service-ca\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.721590 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-trusted-ca-bundle\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.728001 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c315b0b-2610-4eb5-822a-2cef565890d1-console-config\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.735059 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c315b0b-2610-4eb5-822a-2cef565890d1-console-serving-cert\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.736237 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c315b0b-2610-4eb5-822a-2cef565890d1-console-oauth-config\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.759831 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlqll\" (UniqueName: \"kubernetes.io/projected/4c315b0b-2610-4eb5-822a-2cef565890d1-kube-api-access-wlqll\") pod \"console-64479565b-fzhdc\" (UID: \"4c315b0b-2610-4eb5-822a-2cef565890d1\") " pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.795534 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/alertmanager-metric-storage-0"] Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.807659 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/938ddfe0-198f-4050-af00-6c195ffaa41e-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.807735 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.807880 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/938ddfe0-198f-4050-af00-6c195ffaa41e-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.807918 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/938ddfe0-198f-4050-af00-6c195ffaa41e-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.808007 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.808093 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8vpq\" (UniqueName: \"kubernetes.io/projected/938ddfe0-198f-4050-af00-6c195ffaa41e-kube-api-access-l8vpq\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.808153 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.810839 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/938ddfe0-198f-4050-af00-6c195ffaa41e-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.818188 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.818830 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.819985 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/938ddfe0-198f-4050-af00-6c195ffaa41e-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.820217 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/938ddfe0-198f-4050-af00-6c195ffaa41e-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.822007 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/938ddfe0-198f-4050-af00-6c195ffaa41e-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.854660 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8vpq\" (UniqueName: \"kubernetes.io/projected/938ddfe0-198f-4050-af00-6c195ffaa41e-kube-api-access-l8vpq\") pod \"alertmanager-metric-storage-0\" (UID: \"938ddfe0-198f-4050-af00-6c195ffaa41e\") " pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:27 crc kubenswrapper[4925]: I0121 11:18:27.922613 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:28 crc kubenswrapper[4925]: I0121 11:18:28.246831 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:18:28 crc kubenswrapper[4925]: I0121 11:18:28.343634 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:18:29 crc kubenswrapper[4925]: I0121 11:18:29.338033 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerStarted","Data":"6ac6c77d378297990baad20ac16585f2d10620978882efa50e0329ea830da9cd"} Jan 21 11:18:29 crc kubenswrapper[4925]: I0121 11:18:29.896160 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s"] Jan 21 11:18:30 crc kubenswrapper[4925]: W0121 11:18:30.306799 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5dc6045_7192_42dc_b653_a71b80a9f119.slice/crio-7d40dce83bb11cb3b72354a92871da785703784682e36918ddf98e325921c23d WatchSource:0}: Error finding container 7d40dce83bb11cb3b72354a92871da785703784682e36918ddf98e325921c23d: Status 404 returned error can't find the container with id 7d40dce83bb11cb3b72354a92871da785703784682e36918ddf98e325921c23d Jan 21 11:18:30 crc kubenswrapper[4925]: I0121 11:18:30.348055 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64479565b-fzhdc"] Jan 21 11:18:30 crc kubenswrapper[4925]: I0121 11:18:30.415441 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" event={"ID":"a5dc6045-7192-42dc-b653-a71b80a9f119","Type":"ContainerStarted","Data":"7d40dce83bb11cb3b72354a92871da785703784682e36918ddf98e325921c23d"} Jan 21 11:18:30 crc kubenswrapper[4925]: I0121 11:18:30.867934 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/alertmanager-metric-storage-0"] Jan 21 11:18:37 crc kubenswrapper[4925]: I0121 11:18:37.178326 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64479565b-fzhdc" event={"ID":"4c315b0b-2610-4eb5-822a-2cef565890d1","Type":"ContainerStarted","Data":"d28af6e0a8725180e1b5d1dad9ccbbd1c0313fb3c86e1aac0f80df7e540966da"} Jan 21 11:18:37 crc kubenswrapper[4925]: I0121 11:18:37.188919 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"938ddfe0-198f-4050-af00-6c195ffaa41e","Type":"ContainerStarted","Data":"be342f7b9909e1205876601daad531d5ae869370d109ef29197047906773b116"} Jan 21 11:18:38 crc kubenswrapper[4925]: I0121 11:18:38.506126 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-zxq6z" podUID="0ac5019d-ffb4-4cb6-9042-1b983b15841a" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 11:18:38 crc kubenswrapper[4925]: I0121 11:18:38.994531 4925 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.460672239s: [/var/lib/containers/storage/overlay/c27d1d6ab03ba695abdcf39a911cdff1a38f06d6c0080f3b476172cd5ded5ffa/diff ]; will not log again for this container unless duration exceeds 2s Jan 21 11:18:39 crc kubenswrapper[4925]: I0121 11:18:39.188553 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-zxq6z" podUID="0ac5019d-ffb4-4cb6-9042-1b983b15841a" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 11:18:39 crc kubenswrapper[4925]: E0121 11:18:39.231924 4925 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.731s" Jan 21 11:18:49 crc kubenswrapper[4925]: I0121 11:18:49.416617 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64479565b-fzhdc" event={"ID":"4c315b0b-2610-4eb5-822a-2cef565890d1","Type":"ContainerStarted","Data":"a8e1c818e85a6b65f3580d0b66efc6855c614c8d5a45b06416e2ec3b85b80893"} Jan 21 11:18:50 crc kubenswrapper[4925]: I0121 11:18:50.447809 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-64479565b-fzhdc" podStartSLOduration=23.447775408 podStartE2EDuration="23.447775408s" podCreationTimestamp="2026-01-21 11:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:18:50.442086958 +0000 UTC m=+1422.045978912" watchObservedRunningTime="2026-01-21 11:18:50.447775408 +0000 UTC m=+1422.051667342" Jan 21 11:18:51 crc kubenswrapper[4925]: E0121 11:18:51.473509 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Jan 21 11:18:51 crc kubenswrapper[4925]: E0121 11:18:51.473887 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n67fh664h79h545h5dfh558hb8h559h548h84h7fh57ch65ch696h5dhf4h58h588hf8h569h8fhbch596h666h6h5b7hb7h9h679h5b6h685h544q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h6zmt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_watcher-kuttl-default(fcfc5c97-6b6f-41b2-8c2b-265e178b2645): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 11:18:51 crc kubenswrapper[4925]: E0121 11:18:51.475193 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="watcher-kuttl-default/memcached-0" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" Jan 21 11:18:52 crc kubenswrapper[4925]: E0121 11:18:52.444815 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="watcher-kuttl-default/memcached-0" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" Jan 21 11:18:55 crc kubenswrapper[4925]: I0121 11:18:55.478546 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"a76fd86c-08d3-47af-af39-e3336a2f5c0b","Type":"ContainerStarted","Data":"05de8cea18db5294a0fa0b2af153b44b7147c6aff161bdf3b8f6c33d3c930ecc"} Jan 21 11:18:55 crc kubenswrapper[4925]: I0121 11:18:55.482136 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"39b2180d-2f0e-472f-937f-3b25cf112bae","Type":"ContainerStarted","Data":"41f8a4515161523db7491d9b04231422b1096aad3dc3c3a4038c5a7efa0fdd89"} Jan 21 11:18:55 crc kubenswrapper[4925]: I0121 11:18:55.482604 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:18:55 crc kubenswrapper[4925]: I0121 11:18:55.485553 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" event={"ID":"a5dc6045-7192-42dc-b653-a71b80a9f119","Type":"ContainerStarted","Data":"0b4f969514fc7b9ccc94824eb6214b178766162cbd8567d5af263b0b323a55e8"} Jan 21 11:18:55 crc kubenswrapper[4925]: I0121 11:18:55.524345 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-mmj2s" podStartSLOduration=9.361250343 podStartE2EDuration="29.524319115s" podCreationTimestamp="2026-01-21 11:18:26 +0000 UTC" firstStartedPulling="2026-01-21 11:18:30.353825077 +0000 UTC m=+1401.957717011" lastFinishedPulling="2026-01-21 11:18:50.516893839 +0000 UTC m=+1422.120785783" observedRunningTime="2026-01-21 11:18:55.519786981 +0000 UTC m=+1427.123678905" watchObservedRunningTime="2026-01-21 11:18:55.524319115 +0000 UTC m=+1427.128211049" Jan 21 11:18:55 crc kubenswrapper[4925]: I0121 11:18:55.563894 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/kube-state-metrics-0" podStartSLOduration=4.482802197 podStartE2EDuration="32.563873152s" podCreationTimestamp="2026-01-21 11:18:23 +0000 UTC" firstStartedPulling="2026-01-21 11:18:26.047622853 +0000 UTC m=+1397.651514787" lastFinishedPulling="2026-01-21 11:18:54.128693808 +0000 UTC m=+1425.732585742" observedRunningTime="2026-01-21 11:18:55.555797408 +0000 UTC m=+1427.159689342" watchObservedRunningTime="2026-01-21 11:18:55.563873152 +0000 UTC m=+1427.167765086" Jan 21 11:18:56 crc kubenswrapper[4925]: I0121 11:18:56.496957 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"4c494924-513c-4575-a9c9-78e15c3751bc","Type":"ContainerStarted","Data":"634f412a8e6c75620fd65280aacb4713c3cb453673d77dfe98e6e42f4cd7bcab"} Jan 21 11:18:56 crc kubenswrapper[4925]: I0121 11:18:56.498983 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"b7c93089-4b7c-45c7-aa48-64622e536032","Type":"ContainerStarted","Data":"a6903b72a43df88702f90a449adaa14a7c955fe7feae8acfe98df4e7f68a72f3"} Jan 21 11:18:57 crc kubenswrapper[4925]: I0121 11:18:57.512495 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerStarted","Data":"14e0b16b9dc2cac9eb816364b417409e163edf2189e5dd4e81ada2ffc9f39379"} Jan 21 11:18:57 crc kubenswrapper[4925]: I0121 11:18:57.924522 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:57 crc kubenswrapper[4925]: I0121 11:18:57.924604 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:57 crc kubenswrapper[4925]: I0121 11:18:57.934021 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:58 crc kubenswrapper[4925]: I0121 11:18:58.525221 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-64479565b-fzhdc" Jan 21 11:18:58 crc kubenswrapper[4925]: I0121 11:18:58.588497 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-ddcb56777-sqrzz"] Jan 21 11:19:01 crc kubenswrapper[4925]: I0121 11:19:01.573337 4925 generic.go:334] "Generic (PLEG): container finished" podID="a76fd86c-08d3-47af-af39-e3336a2f5c0b" containerID="05de8cea18db5294a0fa0b2af153b44b7147c6aff161bdf3b8f6c33d3c930ecc" exitCode=0 Jan 21 11:19:01 crc kubenswrapper[4925]: I0121 11:19:01.573836 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"a76fd86c-08d3-47af-af39-e3336a2f5c0b","Type":"ContainerDied","Data":"05de8cea18db5294a0fa0b2af153b44b7147c6aff161bdf3b8f6c33d3c930ecc"} Jan 21 11:19:02 crc kubenswrapper[4925]: I0121 11:19:02.585494 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstack-galera-0" event={"ID":"a76fd86c-08d3-47af-af39-e3336a2f5c0b","Type":"ContainerStarted","Data":"8ffeaee5ad265c9df202c07984a3f7667f36ca4b6e23be06db4d00aff43c3a66"} Jan 21 11:19:02 crc kubenswrapper[4925]: I0121 11:19:02.632055 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/openstack-galera-0" podStartSLOduration=11.95055571 podStartE2EDuration="41.632023835s" podCreationTimestamp="2026-01-21 11:18:21 +0000 UTC" firstStartedPulling="2026-01-21 11:18:24.637334953 +0000 UTC m=+1396.241226887" lastFinishedPulling="2026-01-21 11:18:54.318803078 +0000 UTC m=+1425.922695012" observedRunningTime="2026-01-21 11:19:02.608122051 +0000 UTC m=+1434.212013995" watchObservedRunningTime="2026-01-21 11:19:02.632023835 +0000 UTC m=+1434.235915769" Jan 21 11:19:02 crc kubenswrapper[4925]: I0121 11:19:02.857284 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:19:02 crc kubenswrapper[4925]: I0121 11:19:02.857457 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:19:04 crc kubenswrapper[4925]: I0121 11:19:04.503912 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:19:06 crc kubenswrapper[4925]: I0121 11:19:06.623298 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"938ddfe0-198f-4050-af00-6c195ffaa41e","Type":"ContainerStarted","Data":"f92816d514d13e1090ce69645b1fb6306da77c230794db7e3de9dfc34656b395"} Jan 21 11:19:06 crc kubenswrapper[4925]: I0121 11:19:06.625271 4925 generic.go:334] "Generic (PLEG): container finished" podID="5d57ee6d-3979-4874-9325-3922afacba25" containerID="14e0b16b9dc2cac9eb816364b417409e163edf2189e5dd4e81ada2ffc9f39379" exitCode=0 Jan 21 11:19:06 crc kubenswrapper[4925]: I0121 11:19:06.625297 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerDied","Data":"14e0b16b9dc2cac9eb816364b417409e163edf2189e5dd4e81ada2ffc9f39379"} Jan 21 11:19:07 crc kubenswrapper[4925]: I0121 11:19:07.638587 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"fcfc5c97-6b6f-41b2-8c2b-265e178b2645","Type":"ContainerStarted","Data":"b60e710e30fa24623f902db18ad77e3fe8eb6c4f0f2074eb478050cc60a62331"} Jan 21 11:19:07 crc kubenswrapper[4925]: I0121 11:19:07.639461 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/memcached-0" Jan 21 11:19:07 crc kubenswrapper[4925]: I0121 11:19:07.659252 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/memcached-0" podStartSLOduration=3.160242245 podStartE2EDuration="45.659229705s" podCreationTimestamp="2026-01-21 11:18:22 +0000 UTC" firstStartedPulling="2026-01-21 11:18:24.730555385 +0000 UTC m=+1396.334447319" lastFinishedPulling="2026-01-21 11:19:07.229542845 +0000 UTC m=+1438.833434779" observedRunningTime="2026-01-21 11:19:07.658856453 +0000 UTC m=+1439.262748387" watchObservedRunningTime="2026-01-21 11:19:07.659229705 +0000 UTC m=+1439.263121639" Jan 21 11:19:09 crc kubenswrapper[4925]: I0121 11:19:09.041140 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:19:09 crc kubenswrapper[4925]: I0121 11:19:09.136351 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/openstack-galera-0" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.283716 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/root-account-create-update-d8bm7"] Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.287301 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.290499 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"openstack-mariadb-root-db-secret" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.302577 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/root-account-create-update-d8bm7"] Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.403501 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z7g2\" (UniqueName: \"kubernetes.io/projected/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-kube-api-access-4z7g2\") pod \"root-account-create-update-d8bm7\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.403672 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-operator-scripts\") pod \"root-account-create-update-d8bm7\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.505239 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z7g2\" (UniqueName: \"kubernetes.io/projected/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-kube-api-access-4z7g2\") pod \"root-account-create-update-d8bm7\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.505465 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-operator-scripts\") pod \"root-account-create-update-d8bm7\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.507906 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-operator-scripts\") pod \"root-account-create-update-d8bm7\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.529380 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z7g2\" (UniqueName: \"kubernetes.io/projected/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-kube-api-access-4z7g2\") pod \"root-account-create-update-d8bm7\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:12 crc kubenswrapper[4925]: I0121 11:19:12.681715 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.150155 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/memcached-0" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.221550 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn"] Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.223316 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.231971 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-db-secret" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.235196 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn"] Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.292341 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgdbs\" (UniqueName: \"kubernetes.io/projected/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-kube-api-access-vgdbs\") pod \"keystone-7dc6-account-create-update-7k5hn\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.292558 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-operator-scripts\") pod \"keystone-7dc6-account-create-update-7k5hn\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.311295 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-db-create-rjq9w"] Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.312867 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.331655 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-create-rjq9w"] Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.393720 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-operator-scripts\") pod \"keystone-7dc6-account-create-update-7k5hn\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.393833 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-operator-scripts\") pod \"keystone-db-create-rjq9w\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.393879 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq7fn\" (UniqueName: \"kubernetes.io/projected/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-kube-api-access-xq7fn\") pod \"keystone-db-create-rjq9w\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.394156 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgdbs\" (UniqueName: \"kubernetes.io/projected/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-kube-api-access-vgdbs\") pod \"keystone-7dc6-account-create-update-7k5hn\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.396059 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-operator-scripts\") pod \"keystone-7dc6-account-create-update-7k5hn\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.437003 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgdbs\" (UniqueName: \"kubernetes.io/projected/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-kube-api-access-vgdbs\") pod \"keystone-7dc6-account-create-update-7k5hn\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.497108 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-operator-scripts\") pod \"keystone-db-create-rjq9w\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.497201 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq7fn\" (UniqueName: \"kubernetes.io/projected/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-kube-api-access-xq7fn\") pod \"keystone-db-create-rjq9w\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.497929 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-operator-scripts\") pod \"keystone-db-create-rjq9w\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.519028 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq7fn\" (UniqueName: \"kubernetes.io/projected/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-kube-api-access-xq7fn\") pod \"keystone-db-create-rjq9w\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.548272 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:13 crc kubenswrapper[4925]: I0121 11:19:13.633232 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:14 crc kubenswrapper[4925]: I0121 11:19:14.710203 4925 generic.go:334] "Generic (PLEG): container finished" podID="938ddfe0-198f-4050-af00-6c195ffaa41e" containerID="f92816d514d13e1090ce69645b1fb6306da77c230794db7e3de9dfc34656b395" exitCode=0 Jan 21 11:19:14 crc kubenswrapper[4925]: I0121 11:19:14.710263 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"938ddfe0-198f-4050-af00-6c195ffaa41e","Type":"ContainerDied","Data":"f92816d514d13e1090ce69645b1fb6306da77c230794db7e3de9dfc34656b395"} Jan 21 11:19:18 crc kubenswrapper[4925]: I0121 11:19:18.715456 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/root-account-create-update-d8bm7"] Jan 21 11:19:18 crc kubenswrapper[4925]: W0121 11:19:18.719068 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6426427a_7aeb_4bf8_8850_8dd1fbf82adc.slice/crio-b7c2d8c9fb5489958375d344eac97739a209458b50e852b86025c1b3b1900f23 WatchSource:0}: Error finding container b7c2d8c9fb5489958375d344eac97739a209458b50e852b86025c1b3b1900f23: Status 404 returned error can't find the container with id b7c2d8c9fb5489958375d344eac97739a209458b50e852b86025c1b3b1900f23 Jan 21 11:19:18 crc kubenswrapper[4925]: I0121 11:19:18.747998 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/root-account-create-update-d8bm7" event={"ID":"6426427a-7aeb-4bf8-8850-8dd1fbf82adc","Type":"ContainerStarted","Data":"b7c2d8c9fb5489958375d344eac97739a209458b50e852b86025c1b3b1900f23"} Jan 21 11:19:18 crc kubenswrapper[4925]: I0121 11:19:18.750981 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerStarted","Data":"65939efbbe82f15f32b4fe10b8a30abd205cff5b0bef6c72b4a1df0380ea6ef1"} Jan 21 11:19:18 crc kubenswrapper[4925]: I0121 11:19:18.792817 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-create-rjq9w"] Jan 21 11:19:18 crc kubenswrapper[4925]: I0121 11:19:18.871638 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn"] Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.768093 4925 generic.go:334] "Generic (PLEG): container finished" podID="46a8fc8b-5efe-47ee-800e-fd7372a3bc4b" containerID="0b9ede477d3ca1e2a2a9d0a1751087b24b6fd77c9a69304789a10f27aab387f0" exitCode=0 Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.768150 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-create-rjq9w" event={"ID":"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b","Type":"ContainerDied","Data":"0b9ede477d3ca1e2a2a9d0a1751087b24b6fd77c9a69304789a10f27aab387f0"} Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.768525 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-create-rjq9w" event={"ID":"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b","Type":"ContainerStarted","Data":"cc608ddc1f5f7ed0e6b58355b46f5078fbd6b5d147e2e79686b7e69626132fc6"} Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.770372 4925 generic.go:334] "Generic (PLEG): container finished" podID="6426427a-7aeb-4bf8-8850-8dd1fbf82adc" containerID="df7b434e83706b5ac0a5c132941d89bc3f8881c1d3b4d6b26e6f9c2d031f03ac" exitCode=0 Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.770448 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/root-account-create-update-d8bm7" event={"ID":"6426427a-7aeb-4bf8-8850-8dd1fbf82adc","Type":"ContainerDied","Data":"df7b434e83706b5ac0a5c132941d89bc3f8881c1d3b4d6b26e6f9c2d031f03ac"} Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.772784 4925 generic.go:334] "Generic (PLEG): container finished" podID="23a3b777-9ddf-4df2-842a-b9e29e1b7aa0" containerID="98bdf7507f13b94744ccd35c4ea0957f32337b9e793f58da78e857377ac49bf5" exitCode=0 Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.772830 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" event={"ID":"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0","Type":"ContainerDied","Data":"98bdf7507f13b94744ccd35c4ea0957f32337b9e793f58da78e857377ac49bf5"} Jan 21 11:19:19 crc kubenswrapper[4925]: I0121 11:19:19.772865 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" event={"ID":"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0","Type":"ContainerStarted","Data":"8d8bd15e3667a7a2930c39c8ab3bc97e9f3e0c20ebc59055c1598fffcb531235"} Jan 21 11:19:20 crc kubenswrapper[4925]: I0121 11:19:20.784478 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"938ddfe0-198f-4050-af00-6c195ffaa41e","Type":"ContainerStarted","Data":"aa4dfc848ca0260bc64604240f6fb9651431189d2990281c8757fc20d9e69fcf"} Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.145006 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.156402 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-operator-scripts\") pod \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.156509 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4z7g2\" (UniqueName: \"kubernetes.io/projected/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-kube-api-access-4z7g2\") pod \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\" (UID: \"6426427a-7aeb-4bf8-8850-8dd1fbf82adc\") " Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.158471 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6426427a-7aeb-4bf8-8850-8dd1fbf82adc" (UID: "6426427a-7aeb-4bf8-8850-8dd1fbf82adc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.165642 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-kube-api-access-4z7g2" (OuterVolumeSpecName: "kube-api-access-4z7g2") pod "6426427a-7aeb-4bf8-8850-8dd1fbf82adc" (UID: "6426427a-7aeb-4bf8-8850-8dd1fbf82adc"). InnerVolumeSpecName "kube-api-access-4z7g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.257621 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.257662 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4z7g2\" (UniqueName: \"kubernetes.io/projected/6426427a-7aeb-4bf8-8850-8dd1fbf82adc-kube-api-access-4z7g2\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.296704 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.313787 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.358863 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgdbs\" (UniqueName: \"kubernetes.io/projected/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-kube-api-access-vgdbs\") pod \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.358945 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq7fn\" (UniqueName: \"kubernetes.io/projected/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-kube-api-access-xq7fn\") pod \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.359036 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-operator-scripts\") pod \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\" (UID: \"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b\") " Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.359069 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-operator-scripts\") pod \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\" (UID: \"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0\") " Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.359647 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "46a8fc8b-5efe-47ee-800e-fd7372a3bc4b" (UID: "46a8fc8b-5efe-47ee-800e-fd7372a3bc4b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.359766 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "23a3b777-9ddf-4df2-842a-b9e29e1b7aa0" (UID: "23a3b777-9ddf-4df2-842a-b9e29e1b7aa0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.362800 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-kube-api-access-vgdbs" (OuterVolumeSpecName: "kube-api-access-vgdbs") pod "23a3b777-9ddf-4df2-842a-b9e29e1b7aa0" (UID: "23a3b777-9ddf-4df2-842a-b9e29e1b7aa0"). InnerVolumeSpecName "kube-api-access-vgdbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.367581 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-kube-api-access-xq7fn" (OuterVolumeSpecName: "kube-api-access-xq7fn") pod "46a8fc8b-5efe-47ee-800e-fd7372a3bc4b" (UID: "46a8fc8b-5efe-47ee-800e-fd7372a3bc4b"). InnerVolumeSpecName "kube-api-access-xq7fn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.461011 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.461258 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.461273 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgdbs\" (UniqueName: \"kubernetes.io/projected/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0-kube-api-access-vgdbs\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.461287 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq7fn\" (UniqueName: \"kubernetes.io/projected/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b-kube-api-access-xq7fn\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.794778 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-create-rjq9w" event={"ID":"46a8fc8b-5efe-47ee-800e-fd7372a3bc4b","Type":"ContainerDied","Data":"cc608ddc1f5f7ed0e6b58355b46f5078fbd6b5d147e2e79686b7e69626132fc6"} Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.794807 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-create-rjq9w" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.795269 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc608ddc1f5f7ed0e6b58355b46f5078fbd6b5d147e2e79686b7e69626132fc6" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.797857 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/root-account-create-update-d8bm7" event={"ID":"6426427a-7aeb-4bf8-8850-8dd1fbf82adc","Type":"ContainerDied","Data":"b7c2d8c9fb5489958375d344eac97739a209458b50e852b86025c1b3b1900f23"} Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.797913 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7c2d8c9fb5489958375d344eac97739a209458b50e852b86025c1b3b1900f23" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.798009 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/root-account-create-update-d8bm7" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.800850 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" event={"ID":"23a3b777-9ddf-4df2-842a-b9e29e1b7aa0","Type":"ContainerDied","Data":"8d8bd15e3667a7a2930c39c8ab3bc97e9f3e0c20ebc59055c1598fffcb531235"} Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.800907 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d8bd15e3667a7a2930c39c8ab3bc97e9f3e0c20ebc59055c1598fffcb531235" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.800882 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn" Jan 21 11:19:21 crc kubenswrapper[4925]: I0121 11:19:21.805736 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerStarted","Data":"54b32f4d672670129b579755517b120cd949c7c22b12ebd6fa72c2a609cebcd2"} Jan 21 11:19:23 crc kubenswrapper[4925]: I0121 11:19:23.643436 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-ddcb56777-sqrzz" podUID="09c16233-6644-4a24-96d8-69d72c8c921d" containerName="console" containerID="cri-o://7299e6e690be0b64c48b8220ff680a3eddeb29b3f6ed78a0ff31f197ef77e368" gracePeriod=15 Jan 21 11:19:23 crc kubenswrapper[4925]: I0121 11:19:23.835451 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/alertmanager-metric-storage-0" event={"ID":"938ddfe0-198f-4050-af00-6c195ffaa41e","Type":"ContainerStarted","Data":"a448973d891e3c5bc3004ca8c3b188f81583bd4893d8e239b6acc70bb7769db5"} Jan 21 11:19:23 crc kubenswrapper[4925]: I0121 11:19:23.836069 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:19:23 crc kubenswrapper[4925]: I0121 11:19:23.840564 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/alertmanager-metric-storage-0" Jan 21 11:19:23 crc kubenswrapper[4925]: I0121 11:19:23.867749 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/alertmanager-metric-storage-0" podStartSLOduration=13.07848352 podStartE2EDuration="56.867719629s" podCreationTimestamp="2026-01-21 11:18:27 +0000 UTC" firstStartedPulling="2026-01-21 11:18:36.232022555 +0000 UTC m=+1407.835914489" lastFinishedPulling="2026-01-21 11:19:20.021258664 +0000 UTC m=+1451.625150598" observedRunningTime="2026-01-21 11:19:23.864311062 +0000 UTC m=+1455.468203016" watchObservedRunningTime="2026-01-21 11:19:23.867719629 +0000 UTC m=+1455.471611563" Jan 21 11:19:24 crc kubenswrapper[4925]: I0121 11:19:24.846950 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-ddcb56777-sqrzz_09c16233-6644-4a24-96d8-69d72c8c921d/console/0.log" Jan 21 11:19:24 crc kubenswrapper[4925]: I0121 11:19:24.847297 4925 generic.go:334] "Generic (PLEG): container finished" podID="09c16233-6644-4a24-96d8-69d72c8c921d" containerID="7299e6e690be0b64c48b8220ff680a3eddeb29b3f6ed78a0ff31f197ef77e368" exitCode=2 Jan 21 11:19:24 crc kubenswrapper[4925]: I0121 11:19:24.848580 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-ddcb56777-sqrzz" event={"ID":"09c16233-6644-4a24-96d8-69d72c8c921d","Type":"ContainerDied","Data":"7299e6e690be0b64c48b8220ff680a3eddeb29b3f6ed78a0ff31f197ef77e368"} Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.663209 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-ddcb56777-sqrzz_09c16233-6644-4a24-96d8-69d72c8c921d/console/0.log" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.663302 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.775003 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkqsw\" (UniqueName: \"kubernetes.io/projected/09c16233-6644-4a24-96d8-69d72c8c921d-kube-api-access-pkqsw\") pod \"09c16233-6644-4a24-96d8-69d72c8c921d\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.775061 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-serving-cert\") pod \"09c16233-6644-4a24-96d8-69d72c8c921d\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.775167 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-oauth-config\") pod \"09c16233-6644-4a24-96d8-69d72c8c921d\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.775208 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-trusted-ca-bundle\") pod \"09c16233-6644-4a24-96d8-69d72c8c921d\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.775231 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-service-ca\") pod \"09c16233-6644-4a24-96d8-69d72c8c921d\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.775315 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-console-config\") pod \"09c16233-6644-4a24-96d8-69d72c8c921d\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.775341 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-oauth-serving-cert\") pod \"09c16233-6644-4a24-96d8-69d72c8c921d\" (UID: \"09c16233-6644-4a24-96d8-69d72c8c921d\") " Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.776597 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-service-ca" (OuterVolumeSpecName: "service-ca") pod "09c16233-6644-4a24-96d8-69d72c8c921d" (UID: "09c16233-6644-4a24-96d8-69d72c8c921d"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.776637 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "09c16233-6644-4a24-96d8-69d72c8c921d" (UID: "09c16233-6644-4a24-96d8-69d72c8c921d"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.776793 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09c16233-6644-4a24-96d8-69d72c8c921d" (UID: "09c16233-6644-4a24-96d8-69d72c8c921d"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.777552 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-console-config" (OuterVolumeSpecName: "console-config") pod "09c16233-6644-4a24-96d8-69d72c8c921d" (UID: "09c16233-6644-4a24-96d8-69d72c8c921d"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.780499 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "09c16233-6644-4a24-96d8-69d72c8c921d" (UID: "09c16233-6644-4a24-96d8-69d72c8c921d"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.780891 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09c16233-6644-4a24-96d8-69d72c8c921d-kube-api-access-pkqsw" (OuterVolumeSpecName: "kube-api-access-pkqsw") pod "09c16233-6644-4a24-96d8-69d72c8c921d" (UID: "09c16233-6644-4a24-96d8-69d72c8c921d"). InnerVolumeSpecName "kube-api-access-pkqsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.781037 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "09c16233-6644-4a24-96d8-69d72c8c921d" (UID: "09c16233-6644-4a24-96d8-69d72c8c921d"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.859698 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-ddcb56777-sqrzz_09c16233-6644-4a24-96d8-69d72c8c921d/console/0.log" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.859795 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-ddcb56777-sqrzz" event={"ID":"09c16233-6644-4a24-96d8-69d72c8c921d","Type":"ContainerDied","Data":"a5b1a3fb9aa961f3ed9e7e1981adf07a1d724d6c9f7cd1f2df85c7413edffd56"} Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.859832 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-ddcb56777-sqrzz" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.859866 4925 scope.go:117] "RemoveContainer" containerID="7299e6e690be0b64c48b8220ff680a3eddeb29b3f6ed78a0ff31f197ef77e368" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.866487 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerStarted","Data":"d2e8be3e9fa25301a547b791f4e58884a21913b64eee2dfd1aba3d4b982a27b7"} Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.881559 4925 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.881613 4925 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.881633 4925 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.881646 4925 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-console-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.881659 4925 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09c16233-6644-4a24-96d8-69d72c8c921d-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.881672 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkqsw\" (UniqueName: \"kubernetes.io/projected/09c16233-6644-4a24-96d8-69d72c8c921d-kube-api-access-pkqsw\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.881688 4925 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09c16233-6644-4a24-96d8-69d72c8c921d-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.896230 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/prometheus-metric-storage-0" podStartSLOduration=5.705748114 podStartE2EDuration="1m1.89620793s" podCreationTimestamp="2026-01-21 11:18:24 +0000 UTC" firstStartedPulling="2026-01-21 11:18:29.135557909 +0000 UTC m=+1400.739449843" lastFinishedPulling="2026-01-21 11:19:25.326017725 +0000 UTC m=+1456.929909659" observedRunningTime="2026-01-21 11:19:25.893845895 +0000 UTC m=+1457.497737849" watchObservedRunningTime="2026-01-21 11:19:25.89620793 +0000 UTC m=+1457.500099854" Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.918897 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-ddcb56777-sqrzz"] Jan 21 11:19:25 crc kubenswrapper[4925]: I0121 11:19:25.929595 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-ddcb56777-sqrzz"] Jan 21 11:19:26 crc kubenswrapper[4925]: I0121 11:19:26.416289 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:26 crc kubenswrapper[4925]: I0121 11:19:26.416367 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:26 crc kubenswrapper[4925]: I0121 11:19:26.419601 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:26 crc kubenswrapper[4925]: I0121 11:19:26.888976 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:27 crc kubenswrapper[4925]: I0121 11:19:27.514323 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09c16233-6644-4a24-96d8-69d72c8c921d" path="/var/lib/kubelet/pods/09c16233-6644-4a24-96d8-69d72c8c921d/volumes" Jan 21 11:19:28 crc kubenswrapper[4925]: I0121 11:19:28.947955 4925 generic.go:334] "Generic (PLEG): container finished" podID="4c494924-513c-4575-a9c9-78e15c3751bc" containerID="634f412a8e6c75620fd65280aacb4713c3cb453673d77dfe98e6e42f4cd7bcab" exitCode=0 Jan 21 11:19:28 crc kubenswrapper[4925]: I0121 11:19:28.948057 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"4c494924-513c-4575-a9c9-78e15c3751bc","Type":"ContainerDied","Data":"634f412a8e6c75620fd65280aacb4713c3cb453673d77dfe98e6e42f4cd7bcab"} Jan 21 11:19:28 crc kubenswrapper[4925]: I0121 11:19:28.958722 4925 generic.go:334] "Generic (PLEG): container finished" podID="b7c93089-4b7c-45c7-aa48-64622e536032" containerID="a6903b72a43df88702f90a449adaa14a7c955fe7feae8acfe98df4e7f68a72f3" exitCode=0 Jan 21 11:19:28 crc kubenswrapper[4925]: I0121 11:19:28.958800 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"b7c93089-4b7c-45c7-aa48-64622e536032","Type":"ContainerDied","Data":"a6903b72a43df88702f90a449adaa14a7c955fe7feae8acfe98df4e7f68a72f3"} Jan 21 11:19:29 crc kubenswrapper[4925]: I0121 11:19:29.581251 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:19:29 crc kubenswrapper[4925]: I0121 11:19:29.966898 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/prometheus-metric-storage-0" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="prometheus" containerID="cri-o://65939efbbe82f15f32b4fe10b8a30abd205cff5b0bef6c72b4a1df0380ea6ef1" gracePeriod=600 Jan 21 11:19:29 crc kubenswrapper[4925]: I0121 11:19:29.966996 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/prometheus-metric-storage-0" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="thanos-sidecar" containerID="cri-o://d2e8be3e9fa25301a547b791f4e58884a21913b64eee2dfd1aba3d4b982a27b7" gracePeriod=600 Jan 21 11:19:29 crc kubenswrapper[4925]: I0121 11:19:29.967020 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/prometheus-metric-storage-0" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="config-reloader" containerID="cri-o://54b32f4d672670129b579755517b120cd949c7c22b12ebd6fa72c2a609cebcd2" gracePeriod=600 Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.980589 4925 generic.go:334] "Generic (PLEG): container finished" podID="5d57ee6d-3979-4874-9325-3922afacba25" containerID="d2e8be3e9fa25301a547b791f4e58884a21913b64eee2dfd1aba3d4b982a27b7" exitCode=0 Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.981957 4925 generic.go:334] "Generic (PLEG): container finished" podID="5d57ee6d-3979-4874-9325-3922afacba25" containerID="54b32f4d672670129b579755517b120cd949c7c22b12ebd6fa72c2a609cebcd2" exitCode=0 Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.982073 4925 generic.go:334] "Generic (PLEG): container finished" podID="5d57ee6d-3979-4874-9325-3922afacba25" containerID="65939efbbe82f15f32b4fe10b8a30abd205cff5b0bef6c72b4a1df0380ea6ef1" exitCode=0 Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.980618 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerDied","Data":"d2e8be3e9fa25301a547b791f4e58884a21913b64eee2dfd1aba3d4b982a27b7"} Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.982255 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerDied","Data":"54b32f4d672670129b579755517b120cd949c7c22b12ebd6fa72c2a609cebcd2"} Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.982309 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerDied","Data":"65939efbbe82f15f32b4fe10b8a30abd205cff5b0bef6c72b4a1df0380ea6ef1"} Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.987206 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" event={"ID":"4c494924-513c-4575-a9c9-78e15c3751bc","Type":"ContainerStarted","Data":"fa42b2bb87bcc40e911af1801467ece82dd958cb9281629a2625b2d9709522d7"} Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.987482 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:19:30 crc kubenswrapper[4925]: I0121 11:19:30.998234 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/rabbitmq-server-0" event={"ID":"b7c93089-4b7c-45c7-aa48-64622e536032","Type":"ContainerStarted","Data":"2722e553edc10e804ed34a44d931eea3f7ad9db02fab194336fc6b2959ffcf4f"} Jan 21 11:19:31 crc kubenswrapper[4925]: I0121 11:19:31.022740 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" podStartSLOduration=38.836484447 podStartE2EDuration="1m11.022702082s" podCreationTimestamp="2026-01-21 11:18:20 +0000 UTC" firstStartedPulling="2026-01-21 11:18:22.081681166 +0000 UTC m=+1393.685573100" lastFinishedPulling="2026-01-21 11:18:54.267898801 +0000 UTC m=+1425.871790735" observedRunningTime="2026-01-21 11:19:31.016759935 +0000 UTC m=+1462.620651879" watchObservedRunningTime="2026-01-21 11:19:31.022702082 +0000 UTC m=+1462.626594016" Jan 21 11:19:31 crc kubenswrapper[4925]: I0121 11:19:31.416559 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/prometheus-metric-storage-0" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.110:9090/-/ready\": dial tcp 10.217.0.110:9090: connect: connection refused" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.008752 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.062622 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/rabbitmq-server-0" podStartSLOduration=40.712608627 podStartE2EDuration="1m13.062586181s" podCreationTimestamp="2026-01-21 11:18:19 +0000 UTC" firstStartedPulling="2026-01-21 11:18:21.913956703 +0000 UTC m=+1393.517848637" lastFinishedPulling="2026-01-21 11:18:54.263934257 +0000 UTC m=+1425.867826191" observedRunningTime="2026-01-21 11:19:32.056031965 +0000 UTC m=+1463.659923919" watchObservedRunningTime="2026-01-21 11:19:32.062586181 +0000 UTC m=+1463.666478115" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.273355 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.436603 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-0\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.436699 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-thanos-prometheus-http-client-file\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437034 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437067 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-tls-assets\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437110 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-1\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437133 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-2\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437222 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-config\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437257 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7wfp\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-kube-api-access-j7wfp\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437287 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-web-config\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437354 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5d57ee6d-3979-4874-9325-3922afacba25-config-out\") pod \"5d57ee6d-3979-4874-9325-3922afacba25\" (UID: \"5d57ee6d-3979-4874-9325-3922afacba25\") " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437802 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.437988 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.438073 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.451966 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-kube-api-access-j7wfp" (OuterVolumeSpecName: "kube-api-access-j7wfp") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "kube-api-access-j7wfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.458650 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.458674 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d57ee6d-3979-4874-9325-3922afacba25-config-out" (OuterVolumeSpecName: "config-out") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.459686 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.479801 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-config" (OuterVolumeSpecName: "config") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.485822 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-web-config" (OuterVolumeSpecName: "web-config") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.490482 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "5d57ee6d-3979-4874-9325-3922afacba25" (UID: "5d57ee6d-3979-4874-9325-3922afacba25"). InnerVolumeSpecName "pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.539242 4925 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.539708 4925 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540033 4925 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") on node \"crc\" " Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540203 4925 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540247 4925 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540271 4925 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5d57ee6d-3979-4874-9325-3922afacba25-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540293 4925 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540315 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7wfp\" (UniqueName: \"kubernetes.io/projected/5d57ee6d-3979-4874-9325-3922afacba25-kube-api-access-j7wfp\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540330 4925 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5d57ee6d-3979-4874-9325-3922afacba25-web-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.540343 4925 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5d57ee6d-3979-4874-9325-3922afacba25-config-out\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.577080 4925 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.577747 4925 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9") on node "crc" Jan 21 11:19:32 crc kubenswrapper[4925]: I0121 11:19:32.641910 4925 reconciler_common.go:293] "Volume detached for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") on node \"crc\" DevicePath \"\"" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.021967 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"5d57ee6d-3979-4874-9325-3922afacba25","Type":"ContainerDied","Data":"6ac6c77d378297990baad20ac16585f2d10620978882efa50e0329ea830da9cd"} Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.022637 4925 scope.go:117] "RemoveContainer" containerID="d2e8be3e9fa25301a547b791f4e58884a21913b64eee2dfd1aba3d4b982a27b7" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.023098 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.053650 4925 scope.go:117] "RemoveContainer" containerID="54b32f4d672670129b579755517b120cd949c7c22b12ebd6fa72c2a609cebcd2" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.075547 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.091787 4925 scope.go:117] "RemoveContainer" containerID="65939efbbe82f15f32b4fe10b8a30abd205cff5b0bef6c72b4a1df0380ea6ef1" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.092761 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.116035 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.116840 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23a3b777-9ddf-4df2-842a-b9e29e1b7aa0" containerName="mariadb-account-create-update" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.116937 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="23a3b777-9ddf-4df2-842a-b9e29e1b7aa0" containerName="mariadb-account-create-update" Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.117061 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="thanos-sidecar" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.117124 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="thanos-sidecar" Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.117199 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6426427a-7aeb-4bf8-8850-8dd1fbf82adc" containerName="mariadb-account-create-update" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.117259 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6426427a-7aeb-4bf8-8850-8dd1fbf82adc" containerName="mariadb-account-create-update" Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.117347 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46a8fc8b-5efe-47ee-800e-fd7372a3bc4b" containerName="mariadb-database-create" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.117478 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="46a8fc8b-5efe-47ee-800e-fd7372a3bc4b" containerName="mariadb-database-create" Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.117588 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09c16233-6644-4a24-96d8-69d72c8c921d" containerName="console" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.117669 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="09c16233-6644-4a24-96d8-69d72c8c921d" containerName="console" Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.117771 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="init-config-reloader" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.117842 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="init-config-reloader" Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.117998 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="prometheus" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118094 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="prometheus" Jan 21 11:19:33 crc kubenswrapper[4925]: E0121 11:19:33.118163 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="config-reloader" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118221 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="config-reloader" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118545 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="46a8fc8b-5efe-47ee-800e-fd7372a3bc4b" containerName="mariadb-database-create" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118650 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="6426427a-7aeb-4bf8-8850-8dd1fbf82adc" containerName="mariadb-account-create-update" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118734 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="config-reloader" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118796 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="23a3b777-9ddf-4df2-842a-b9e29e1b7aa0" containerName="mariadb-account-create-update" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118861 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="prometheus" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118922 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="09c16233-6644-4a24-96d8-69d72c8c921d" containerName="console" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.118994 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d57ee6d-3979-4874-9325-3922afacba25" containerName="thanos-sidecar" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.121132 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.126931 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-metric-storage-prometheus-svc" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.127184 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.127351 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-1" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.127580 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"metric-storage-prometheus-dockercfg-6kxpc" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.127750 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-web-config" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.127899 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.128509 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-2" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.128686 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"prometheus-metric-storage-rulefiles-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.156970 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.159998 4925 scope.go:117] "RemoveContainer" containerID="14e0b16b9dc2cac9eb816364b417409e163edf2189e5dd4e81ada2ffc9f39379" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.164927 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"prometheus-metric-storage-tls-assets-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.251789 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.251859 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.251901 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-config\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.251940 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.251984 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252007 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252038 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252064 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65hhc\" (UniqueName: \"kubernetes.io/projected/74f59733-5086-4ebd-9e6a-764a947d38b4-kube-api-access-65hhc\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252106 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/74f59733-5086-4ebd-9e6a-764a947d38b4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252137 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/74f59733-5086-4ebd-9e6a-764a947d38b4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252192 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252217 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.252496 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354166 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354286 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354329 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354354 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-config\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354413 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354456 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354482 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354511 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354542 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65hhc\" (UniqueName: \"kubernetes.io/projected/74f59733-5086-4ebd-9e6a-764a947d38b4-kube-api-access-65hhc\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354590 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/74f59733-5086-4ebd-9e6a-764a947d38b4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354619 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/74f59733-5086-4ebd-9e6a-764a947d38b4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354680 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.354728 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.356784 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.360410 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.362570 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.363316 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/74f59733-5086-4ebd-9e6a-764a947d38b4-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.364360 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.367157 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.367991 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.368026 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-config\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.368484 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/74f59733-5086-4ebd-9e6a-764a947d38b4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.368638 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/74f59733-5086-4ebd-9e6a-764a947d38b4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.370188 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f59733-5086-4ebd-9e6a-764a947d38b4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.374909 4925 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.375137 4925 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/96a0132b9b32581b5fb9ddbb16c4fadae4ddfa3fdc0501538288252f0717dbd1/globalmount\"" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.383733 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65hhc\" (UniqueName: \"kubernetes.io/projected/74f59733-5086-4ebd-9e6a-764a947d38b4-kube-api-access-65hhc\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.435077 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-013b0fa6-fcff-4603-b0e2-8b05b8f2c9f9\") pod \"prometheus-metric-storage-0\" (UID: \"74f59733-5086-4ebd-9e6a-764a947d38b4\") " pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.455811 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:19:33 crc kubenswrapper[4925]: I0121 11:19:33.516483 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d57ee6d-3979-4874-9325-3922afacba25" path="/var/lib/kubelet/pods/5d57ee6d-3979-4874-9325-3922afacba25/volumes" Jan 21 11:19:34 crc kubenswrapper[4925]: I0121 11:19:34.106257 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/prometheus-metric-storage-0"] Jan 21 11:19:34 crc kubenswrapper[4925]: W0121 11:19:34.108934 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74f59733_5086_4ebd_9e6a_764a947d38b4.slice/crio-48817b3fb18ee7ea4db12472d6455f830b866061e4c3e882ebaf40654fb27693 WatchSource:0}: Error finding container 48817b3fb18ee7ea4db12472d6455f830b866061e4c3e882ebaf40654fb27693: Status 404 returned error can't find the container with id 48817b3fb18ee7ea4db12472d6455f830b866061e4c3e882ebaf40654fb27693 Jan 21 11:19:35 crc kubenswrapper[4925]: I0121 11:19:35.087346 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"74f59733-5086-4ebd-9e6a-764a947d38b4","Type":"ContainerStarted","Data":"48817b3fb18ee7ea4db12472d6455f830b866061e4c3e882ebaf40654fb27693"} Jan 21 11:19:38 crc kubenswrapper[4925]: I0121 11:19:38.257215 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"74f59733-5086-4ebd-9e6a-764a947d38b4","Type":"ContainerStarted","Data":"838662a0b71e9cf7b2d23aee32f4ae25f5f71af225ae6c54ed540a10ca6e58b1"} Jan 21 11:19:41 crc kubenswrapper[4925]: I0121 11:19:41.303032 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/rabbitmq-server-0" podUID="b7c93089-4b7c-45c7-aa48-64622e536032" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Jan 21 11:19:41 crc kubenswrapper[4925]: I0121 11:19:41.588687 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/rabbitmq-notifications-server-0" Jan 21 11:19:46 crc kubenswrapper[4925]: I0121 11:19:46.339310 4925 generic.go:334] "Generic (PLEG): container finished" podID="74f59733-5086-4ebd-9e6a-764a947d38b4" containerID="838662a0b71e9cf7b2d23aee32f4ae25f5f71af225ae6c54ed540a10ca6e58b1" exitCode=0 Jan 21 11:19:46 crc kubenswrapper[4925]: I0121 11:19:46.339471 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"74f59733-5086-4ebd-9e6a-764a947d38b4","Type":"ContainerDied","Data":"838662a0b71e9cf7b2d23aee32f4ae25f5f71af225ae6c54ed540a10ca6e58b1"} Jan 21 11:19:47 crc kubenswrapper[4925]: I0121 11:19:47.349913 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"74f59733-5086-4ebd-9e6a-764a947d38b4","Type":"ContainerStarted","Data":"8b422ed87c5942a4c4281c8a77b2865be440b6ac228504bf07deb45563dbac72"} Jan 21 11:19:49 crc kubenswrapper[4925]: I0121 11:19:49.369095 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"74f59733-5086-4ebd-9e6a-764a947d38b4","Type":"ContainerStarted","Data":"4b1608db1c6505746c853b88d4693c258c809422eb3d22d24a8842d16cffb429"} Jan 21 11:19:49 crc kubenswrapper[4925]: I0121 11:19:49.941418 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:19:49 crc kubenswrapper[4925]: I0121 11:19:49.941849 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:19:50 crc kubenswrapper[4925]: I0121 11:19:50.379919 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/prometheus-metric-storage-0" event={"ID":"74f59733-5086-4ebd-9e6a-764a947d38b4","Type":"ContainerStarted","Data":"fd23373701ccfdbbca85ac7b862044ad22d179fdc8da6fe5b90c3fb4f892f25f"} Jan 21 11:19:50 crc kubenswrapper[4925]: I0121 11:19:50.415765 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/prometheus-metric-storage-0" podStartSLOduration=17.415736821 podStartE2EDuration="17.415736821s" podCreationTimestamp="2026-01-21 11:19:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:19:50.40937551 +0000 UTC m=+1482.013267454" watchObservedRunningTime="2026-01-21 11:19:50.415736821 +0000 UTC m=+1482.019628765" Jan 21 11:19:51 crc kubenswrapper[4925]: I0121 11:19:51.303313 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/rabbitmq-server-0" Jan 21 11:19:51 crc kubenswrapper[4925]: I0121 11:19:51.997505 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-db-sync-jnj2q"] Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.003385 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.013372 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-g65fq" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.013699 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.014034 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.017278 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-jnj2q"] Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.019526 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.085836 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2knh\" (UniqueName: \"kubernetes.io/projected/82ae5abc-2167-42bc-9613-27710d083439-kube-api-access-j2knh\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.085923 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-config-data\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.086326 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-combined-ca-bundle\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.188160 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-config-data\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.188342 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-combined-ca-bundle\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.188451 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2knh\" (UniqueName: \"kubernetes.io/projected/82ae5abc-2167-42bc-9613-27710d083439-kube-api-access-j2knh\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.196499 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-config-data\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.196825 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-combined-ca-bundle\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.242993 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2knh\" (UniqueName: \"kubernetes.io/projected/82ae5abc-2167-42bc-9613-27710d083439-kube-api-access-j2knh\") pod \"keystone-db-sync-jnj2q\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:52 crc kubenswrapper[4925]: I0121 11:19:52.325999 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:19:53 crc kubenswrapper[4925]: I0121 11:19:53.347293 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-jnj2q"] Jan 21 11:19:53 crc kubenswrapper[4925]: I0121 11:19:53.407880 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" event={"ID":"82ae5abc-2167-42bc-9613-27710d083439","Type":"ContainerStarted","Data":"33d00833a9c680062fd8f52c22bf5eac9bcf0aead84356c1c488aa6ad7ca0edd"} Jan 21 11:19:53 crc kubenswrapper[4925]: I0121 11:19:53.456838 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:20:03 crc kubenswrapper[4925]: I0121 11:20:03.457714 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:20:03 crc kubenswrapper[4925]: I0121 11:20:03.467671 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:20:03 crc kubenswrapper[4925]: I0121 11:20:03.846065 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/prometheus-metric-storage-0" Jan 21 11:20:04 crc kubenswrapper[4925]: I0121 11:20:04.893152 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" event={"ID":"82ae5abc-2167-42bc-9613-27710d083439","Type":"ContainerStarted","Data":"6a1cd23d9ec366c434b4f0b8ee68b1607b1bce8c619010bad469a4345b6bb361"} Jan 21 11:20:04 crc kubenswrapper[4925]: I0121 11:20:04.915590 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" podStartSLOduration=3.314041252 podStartE2EDuration="13.915564987s" podCreationTimestamp="2026-01-21 11:19:51 +0000 UTC" firstStartedPulling="2026-01-21 11:19:53.353238789 +0000 UTC m=+1484.957130723" lastFinishedPulling="2026-01-21 11:20:03.954762524 +0000 UTC m=+1495.558654458" observedRunningTime="2026-01-21 11:20:04.912830301 +0000 UTC m=+1496.516722235" watchObservedRunningTime="2026-01-21 11:20:04.915564987 +0000 UTC m=+1496.519456921" Jan 21 11:20:08 crc kubenswrapper[4925]: I0121 11:20:08.028659 4925 generic.go:334] "Generic (PLEG): container finished" podID="82ae5abc-2167-42bc-9613-27710d083439" containerID="6a1cd23d9ec366c434b4f0b8ee68b1607b1bce8c619010bad469a4345b6bb361" exitCode=0 Jan 21 11:20:08 crc kubenswrapper[4925]: I0121 11:20:08.028724 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" event={"ID":"82ae5abc-2167-42bc-9613-27710d083439","Type":"ContainerDied","Data":"6a1cd23d9ec366c434b4f0b8ee68b1607b1bce8c619010bad469a4345b6bb361"} Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.366224 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.491655 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2knh\" (UniqueName: \"kubernetes.io/projected/82ae5abc-2167-42bc-9613-27710d083439-kube-api-access-j2knh\") pod \"82ae5abc-2167-42bc-9613-27710d083439\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.491742 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-config-data\") pod \"82ae5abc-2167-42bc-9613-27710d083439\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.491991 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-combined-ca-bundle\") pod \"82ae5abc-2167-42bc-9613-27710d083439\" (UID: \"82ae5abc-2167-42bc-9613-27710d083439\") " Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.500758 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82ae5abc-2167-42bc-9613-27710d083439-kube-api-access-j2knh" (OuterVolumeSpecName: "kube-api-access-j2knh") pod "82ae5abc-2167-42bc-9613-27710d083439" (UID: "82ae5abc-2167-42bc-9613-27710d083439"). InnerVolumeSpecName "kube-api-access-j2knh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.522908 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82ae5abc-2167-42bc-9613-27710d083439" (UID: "82ae5abc-2167-42bc-9613-27710d083439"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.551699 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-config-data" (OuterVolumeSpecName: "config-data") pod "82ae5abc-2167-42bc-9613-27710d083439" (UID: "82ae5abc-2167-42bc-9613-27710d083439"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.593729 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.593770 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2knh\" (UniqueName: \"kubernetes.io/projected/82ae5abc-2167-42bc-9613-27710d083439-kube-api-access-j2knh\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:09 crc kubenswrapper[4925]: I0121 11:20:09.593782 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82ae5abc-2167-42bc-9613-27710d083439-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.049793 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" event={"ID":"82ae5abc-2167-42bc-9613-27710d083439","Type":"ContainerDied","Data":"33d00833a9c680062fd8f52c22bf5eac9bcf0aead84356c1c488aa6ad7ca0edd"} Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.049850 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33d00833a9c680062fd8f52c22bf5eac9bcf0aead84356c1c488aa6ad7ca0edd" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.049894 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-db-sync-jnj2q" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.269568 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-9xnnt"] Jan 21 11:20:10 crc kubenswrapper[4925]: E0121 11:20:10.270252 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ae5abc-2167-42bc-9613-27710d083439" containerName="keystone-db-sync" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.270358 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ae5abc-2167-42bc-9613-27710d083439" containerName="keystone-db-sync" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.270691 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="82ae5abc-2167-42bc-9613-27710d083439" containerName="keystone-db-sync" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.271432 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.274743 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.275221 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.275232 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-g65fq" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.275819 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.277067 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"osp-secret" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.294060 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-9xnnt"] Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.408775 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-fernet-keys\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.409774 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-config-data\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.410207 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-credential-keys\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.410364 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slnf7\" (UniqueName: \"kubernetes.io/projected/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-kube-api-access-slnf7\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.410462 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-combined-ca-bundle\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.410489 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-scripts\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.451522 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.454215 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.464991 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.467164 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.501418 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511493 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-scripts\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511566 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-credential-keys\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511598 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s26x6\" (UniqueName: \"kubernetes.io/projected/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-kube-api-access-s26x6\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511647 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-config-data\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511692 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slnf7\" (UniqueName: \"kubernetes.io/projected/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-kube-api-access-slnf7\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511743 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-combined-ca-bundle\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511767 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-scripts\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511801 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511831 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-fernet-keys\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511874 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511912 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-run-httpd\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511947 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-log-httpd\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.511977 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-config-data\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.523658 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-fernet-keys\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.527612 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-config-data\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.531907 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-scripts\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.534263 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-combined-ca-bundle\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.561049 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-credential-keys\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.568243 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slnf7\" (UniqueName: \"kubernetes.io/projected/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-kube-api-access-slnf7\") pod \"keystone-bootstrap-9xnnt\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.602859 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.614653 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-scripts\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.615068 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s26x6\" (UniqueName: \"kubernetes.io/projected/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-kube-api-access-s26x6\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.615131 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-config-data\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.615188 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.615233 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.615255 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-run-httpd\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.615292 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-log-httpd\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.615936 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-log-httpd\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.626520 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-scripts\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.628359 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-run-httpd\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.639650 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.640507 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.655726 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-config-data\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.692407 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s26x6\" (UniqueName: \"kubernetes.io/projected/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-kube-api-access-s26x6\") pod \"ceilometer-0\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:10 crc kubenswrapper[4925]: I0121 11:20:10.793132 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:11 crc kubenswrapper[4925]: I0121 11:20:11.415580 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-9xnnt"] Jan 21 11:20:11 crc kubenswrapper[4925]: I0121 11:20:11.592972 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:11 crc kubenswrapper[4925]: W0121 11:20:11.600152 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd6b3950_f1a0_47e8_9c96_3e632e8ca687.slice/crio-45b2d1c90be79c4b6db4ad2bd204b42cb75dd300d26c4f2e2f583c01c6345a49 WatchSource:0}: Error finding container 45b2d1c90be79c4b6db4ad2bd204b42cb75dd300d26c4f2e2f583c01c6345a49: Status 404 returned error can't find the container with id 45b2d1c90be79c4b6db4ad2bd204b42cb75dd300d26c4f2e2f583c01c6345a49 Jan 21 11:20:12 crc kubenswrapper[4925]: I0121 11:20:12.081766 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" event={"ID":"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f","Type":"ContainerStarted","Data":"e890de960a13e545a06f122979112b8582565c7231f881918fc64c0688f6d862"} Jan 21 11:20:12 crc kubenswrapper[4925]: I0121 11:20:12.082292 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" event={"ID":"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f","Type":"ContainerStarted","Data":"a4dbc7c00482b69840b2c0b1558018d803978c8163789552fa96441774253a8f"} Jan 21 11:20:12 crc kubenswrapper[4925]: I0121 11:20:12.084973 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerStarted","Data":"45b2d1c90be79c4b6db4ad2bd204b42cb75dd300d26c4f2e2f583c01c6345a49"} Jan 21 11:20:12 crc kubenswrapper[4925]: I0121 11:20:12.103387 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" podStartSLOduration=2.103358355 podStartE2EDuration="2.103358355s" podCreationTimestamp="2026-01-21 11:20:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:20:12.099751151 +0000 UTC m=+1503.703643095" watchObservedRunningTime="2026-01-21 11:20:12.103358355 +0000 UTC m=+1503.707250289" Jan 21 11:20:13 crc kubenswrapper[4925]: I0121 11:20:13.229235 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:18 crc kubenswrapper[4925]: I0121 11:20:18.154750 4925 generic.go:334] "Generic (PLEG): container finished" podID="ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" containerID="e890de960a13e545a06f122979112b8582565c7231f881918fc64c0688f6d862" exitCode=0 Jan 21 11:20:18 crc kubenswrapper[4925]: I0121 11:20:18.154852 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" event={"ID":"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f","Type":"ContainerDied","Data":"e890de960a13e545a06f122979112b8582565c7231f881918fc64c0688f6d862"} Jan 21 11:20:18 crc kubenswrapper[4925]: I0121 11:20:18.164786 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerStarted","Data":"e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e"} Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.543420 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.648577 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-config-data\") pod \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.648647 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-fernet-keys\") pod \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.648738 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-credential-keys\") pod \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.648764 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slnf7\" (UniqueName: \"kubernetes.io/projected/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-kube-api-access-slnf7\") pod \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.648817 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-scripts\") pod \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.648912 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-combined-ca-bundle\") pod \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\" (UID: \"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f\") " Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.656410 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-scripts" (OuterVolumeSpecName: "scripts") pod "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" (UID: "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.656898 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" (UID: "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.656957 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" (UID: "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.664233 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-kube-api-access-slnf7" (OuterVolumeSpecName: "kube-api-access-slnf7") pod "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" (UID: "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f"). InnerVolumeSpecName "kube-api-access-slnf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.675416 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-config-data" (OuterVolumeSpecName: "config-data") pod "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" (UID: "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.678442 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" (UID: "ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.752453 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.752520 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.752595 4925 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.752607 4925 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.752619 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slnf7\" (UniqueName: \"kubernetes.io/projected/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-kube-api-access-slnf7\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:19 crc kubenswrapper[4925]: I0121 11:20:19.752632 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.098360 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.098522 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.184608 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" event={"ID":"ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f","Type":"ContainerDied","Data":"a4dbc7c00482b69840b2c0b1558018d803978c8163789552fa96441774253a8f"} Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.184657 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-9xnnt" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.184664 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4dbc7c00482b69840b2c0b1558018d803978c8163789552fa96441774253a8f" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.284458 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-9xnnt"] Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.292240 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-9xnnt"] Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.375224 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-mvrqb"] Jan 21 11:20:20 crc kubenswrapper[4925]: E0121 11:20:20.375833 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" containerName="keystone-bootstrap" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.375860 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" containerName="keystone-bootstrap" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.376262 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" containerName="keystone-bootstrap" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.377173 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.385173 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.385305 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.385773 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.386691 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-g65fq" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.386787 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"osp-secret" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.392851 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-mvrqb"] Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.518160 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-config-data\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.528433 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-scripts\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.528891 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-combined-ca-bundle\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.529072 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j59pl\" (UniqueName: \"kubernetes.io/projected/61131ba1-5e24-4728-bd8d-adb5a0c63136-kube-api-access-j59pl\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.529250 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-fernet-keys\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.529387 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-credential-keys\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.630937 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-scripts\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.631016 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-combined-ca-bundle\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.631052 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j59pl\" (UniqueName: \"kubernetes.io/projected/61131ba1-5e24-4728-bd8d-adb5a0c63136-kube-api-access-j59pl\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.631074 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-fernet-keys\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.631098 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-credential-keys\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.631150 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-config-data\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.636793 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-scripts\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.636849 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-credential-keys\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.637116 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-combined-ca-bundle\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.641994 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-fernet-keys\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.644125 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-config-data\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.651524 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j59pl\" (UniqueName: \"kubernetes.io/projected/61131ba1-5e24-4728-bd8d-adb5a0c63136-kube-api-access-j59pl\") pod \"keystone-bootstrap-mvrqb\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:20 crc kubenswrapper[4925]: I0121 11:20:20.699885 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:21 crc kubenswrapper[4925]: I0121 11:20:21.236270 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-mvrqb"] Jan 21 11:20:21 crc kubenswrapper[4925]: I0121 11:20:21.522934 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f" path="/var/lib/kubelet/pods/ae926bfd-b7a8-42a4-b3a7-f8a4b8ae8b1f/volumes" Jan 21 11:20:22 crc kubenswrapper[4925]: I0121 11:20:22.206693 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" event={"ID":"61131ba1-5e24-4728-bd8d-adb5a0c63136","Type":"ContainerStarted","Data":"267b62237c90a2ba240d18c1b9ea0bf395c21ac33971617e7dd4b95c609f6275"} Jan 21 11:20:22 crc kubenswrapper[4925]: I0121 11:20:22.742279 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:20:23 crc kubenswrapper[4925]: I0121 11:20:23.221732 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerStarted","Data":"a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38"} Jan 21 11:20:23 crc kubenswrapper[4925]: I0121 11:20:23.228015 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" event={"ID":"61131ba1-5e24-4728-bd8d-adb5a0c63136","Type":"ContainerStarted","Data":"86a16f3ff2d07884a507606696ce02015881f320482be8d52f82127545582bfa"} Jan 21 11:20:23 crc kubenswrapper[4925]: I0121 11:20:23.256302 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" podStartSLOduration=3.2562715300000002 podStartE2EDuration="3.25627153s" podCreationTimestamp="2026-01-21 11:20:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:20:23.253409729 +0000 UTC m=+1514.857301663" watchObservedRunningTime="2026-01-21 11:20:23.25627153 +0000 UTC m=+1514.860163464" Jan 21 11:20:28 crc kubenswrapper[4925]: I0121 11:20:28.286786 4925 generic.go:334] "Generic (PLEG): container finished" podID="61131ba1-5e24-4728-bd8d-adb5a0c63136" containerID="86a16f3ff2d07884a507606696ce02015881f320482be8d52f82127545582bfa" exitCode=0 Jan 21 11:20:28 crc kubenswrapper[4925]: I0121 11:20:28.286942 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" event={"ID":"61131ba1-5e24-4728-bd8d-adb5a0c63136","Type":"ContainerDied","Data":"86a16f3ff2d07884a507606696ce02015881f320482be8d52f82127545582bfa"} Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.262647 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.357229 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j59pl\" (UniqueName: \"kubernetes.io/projected/61131ba1-5e24-4728-bd8d-adb5a0c63136-kube-api-access-j59pl\") pod \"61131ba1-5e24-4728-bd8d-adb5a0c63136\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.357357 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-combined-ca-bundle\") pod \"61131ba1-5e24-4728-bd8d-adb5a0c63136\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.357410 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-scripts\") pod \"61131ba1-5e24-4728-bd8d-adb5a0c63136\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.357480 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-fernet-keys\") pod \"61131ba1-5e24-4728-bd8d-adb5a0c63136\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.357733 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-credential-keys\") pod \"61131ba1-5e24-4728-bd8d-adb5a0c63136\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.357807 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-config-data\") pod \"61131ba1-5e24-4728-bd8d-adb5a0c63136\" (UID: \"61131ba1-5e24-4728-bd8d-adb5a0c63136\") " Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.377911 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "61131ba1-5e24-4728-bd8d-adb5a0c63136" (UID: "61131ba1-5e24-4728-bd8d-adb5a0c63136"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.378017 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-scripts" (OuterVolumeSpecName: "scripts") pod "61131ba1-5e24-4728-bd8d-adb5a0c63136" (UID: "61131ba1-5e24-4728-bd8d-adb5a0c63136"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.378732 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61131ba1-5e24-4728-bd8d-adb5a0c63136-kube-api-access-j59pl" (OuterVolumeSpecName: "kube-api-access-j59pl") pod "61131ba1-5e24-4728-bd8d-adb5a0c63136" (UID: "61131ba1-5e24-4728-bd8d-adb5a0c63136"). InnerVolumeSpecName "kube-api-access-j59pl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.394735 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "61131ba1-5e24-4728-bd8d-adb5a0c63136" (UID: "61131ba1-5e24-4728-bd8d-adb5a0c63136"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.408850 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" event={"ID":"61131ba1-5e24-4728-bd8d-adb5a0c63136","Type":"ContainerDied","Data":"267b62237c90a2ba240d18c1b9ea0bf395c21ac33971617e7dd4b95c609f6275"} Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.408917 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="267b62237c90a2ba240d18c1b9ea0bf395c21ac33971617e7dd4b95c609f6275" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.408998 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-mvrqb" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.410265 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-config-data" (OuterVolumeSpecName: "config-data") pod "61131ba1-5e24-4728-bd8d-adb5a0c63136" (UID: "61131ba1-5e24-4728-bd8d-adb5a0c63136"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.421791 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61131ba1-5e24-4728-bd8d-adb5a0c63136" (UID: "61131ba1-5e24-4728-bd8d-adb5a0c63136"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.460233 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j59pl\" (UniqueName: \"kubernetes.io/projected/61131ba1-5e24-4728-bd8d-adb5a0c63136-kube-api-access-j59pl\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.460435 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.460574 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.460662 4925 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.460792 4925 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:30 crc kubenswrapper[4925]: I0121 11:20:30.460901 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61131ba1-5e24-4728-bd8d-adb5a0c63136-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.429996 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerStarted","Data":"db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327"} Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.591783 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-665b4c6f5-gzz77"] Jan 21 11:20:31 crc kubenswrapper[4925]: E0121 11:20:31.592528 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61131ba1-5e24-4728-bd8d-adb5a0c63136" containerName="keystone-bootstrap" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.592557 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="61131ba1-5e24-4728-bd8d-adb5a0c63136" containerName="keystone-bootstrap" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.592841 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="61131ba1-5e24-4728-bd8d-adb5a0c63136" containerName="keystone-bootstrap" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.593600 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.598463 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-keystone-internal-svc" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.598585 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.598481 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-scripts" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.598641 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-keystone-dockercfg-g65fq" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.599077 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"keystone-config-data" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.599162 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-keystone-public-svc" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.625483 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-665b4c6f5-gzz77"] Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686014 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-credential-keys\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686134 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-fernet-keys\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686163 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-internal-tls-certs\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686191 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-public-tls-certs\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686230 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-scripts\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686319 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-combined-ca-bundle\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686366 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m5gg\" (UniqueName: \"kubernetes.io/projected/cac93b53-352f-4f28-a456-b80df0aa2670-kube-api-access-6m5gg\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.686433 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-config-data\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.788371 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-credential-keys\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.788600 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-fernet-keys\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.789609 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-internal-tls-certs\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.789650 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-public-tls-certs\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.789698 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-scripts\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.789789 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-combined-ca-bundle\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.789864 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m5gg\" (UniqueName: \"kubernetes.io/projected/cac93b53-352f-4f28-a456-b80df0aa2670-kube-api-access-6m5gg\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.789900 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-config-data\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.798064 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-internal-tls-certs\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.800156 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-credential-keys\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.800999 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-fernet-keys\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.805381 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-scripts\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.812304 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-public-tls-certs\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.816551 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m5gg\" (UniqueName: \"kubernetes.io/projected/cac93b53-352f-4f28-a456-b80df0aa2670-kube-api-access-6m5gg\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.820713 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-combined-ca-bundle\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.820922 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-config-data\") pod \"keystone-665b4c6f5-gzz77\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:31 crc kubenswrapper[4925]: I0121 11:20:31.923380 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:32 crc kubenswrapper[4925]: I0121 11:20:32.478936 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-665b4c6f5-gzz77"] Jan 21 11:20:33 crc kubenswrapper[4925]: I0121 11:20:33.462772 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" event={"ID":"cac93b53-352f-4f28-a456-b80df0aa2670","Type":"ContainerStarted","Data":"0ccbe5d1bab3ab9004e7695c4ce67a3a4372062e630d2bc5f444e0c3b2ba84c6"} Jan 21 11:20:33 crc kubenswrapper[4925]: I0121 11:20:33.466260 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" event={"ID":"cac93b53-352f-4f28-a456-b80df0aa2670","Type":"ContainerStarted","Data":"72e0783ccda2a2894004575b9ac6b1711e66a0188ddb127e8e8657abf5b62e77"} Jan 21 11:20:33 crc kubenswrapper[4925]: I0121 11:20:33.466294 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:20:33 crc kubenswrapper[4925]: I0121 11:20:33.498742 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" podStartSLOduration=2.498720768 podStartE2EDuration="2.498720768s" podCreationTimestamp="2026-01-21 11:20:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:20:33.488561938 +0000 UTC m=+1525.092453882" watchObservedRunningTime="2026-01-21 11:20:33.498720768 +0000 UTC m=+1525.102612702" Jan 21 11:20:42 crc kubenswrapper[4925]: I0121 11:20:42.551771 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerStarted","Data":"23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2"} Jan 21 11:20:42 crc kubenswrapper[4925]: I0121 11:20:42.552574 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:42 crc kubenswrapper[4925]: I0121 11:20:42.552043 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="proxy-httpd" containerID="cri-o://23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2" gracePeriod=30 Jan 21 11:20:42 crc kubenswrapper[4925]: I0121 11:20:42.552069 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-notification-agent" containerID="cri-o://a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38" gracePeriod=30 Jan 21 11:20:42 crc kubenswrapper[4925]: I0121 11:20:42.551932 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-central-agent" containerID="cri-o://e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e" gracePeriod=30 Jan 21 11:20:42 crc kubenswrapper[4925]: I0121 11:20:42.551988 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="sg-core" containerID="cri-o://db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327" gracePeriod=30 Jan 21 11:20:42 crc kubenswrapper[4925]: I0121 11:20:42.581152 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.376641354 podStartE2EDuration="32.58112045s" podCreationTimestamp="2026-01-21 11:20:10 +0000 UTC" firstStartedPulling="2026-01-21 11:20:11.60284108 +0000 UTC m=+1503.206733014" lastFinishedPulling="2026-01-21 11:20:41.807320176 +0000 UTC m=+1533.411212110" observedRunningTime="2026-01-21 11:20:42.577003981 +0000 UTC m=+1534.180895915" watchObservedRunningTime="2026-01-21 11:20:42.58112045 +0000 UTC m=+1534.185012384" Jan 21 11:20:43 crc kubenswrapper[4925]: I0121 11:20:43.564264 4925 generic.go:334] "Generic (PLEG): container finished" podID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerID="23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2" exitCode=0 Jan 21 11:20:43 crc kubenswrapper[4925]: I0121 11:20:43.564610 4925 generic.go:334] "Generic (PLEG): container finished" podID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerID="db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327" exitCode=2 Jan 21 11:20:43 crc kubenswrapper[4925]: I0121 11:20:43.564623 4925 generic.go:334] "Generic (PLEG): container finished" podID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerID="e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e" exitCode=0 Jan 21 11:20:43 crc kubenswrapper[4925]: I0121 11:20:43.564360 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerDied","Data":"23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2"} Jan 21 11:20:43 crc kubenswrapper[4925]: I0121 11:20:43.564671 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerDied","Data":"db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327"} Jan 21 11:20:43 crc kubenswrapper[4925]: I0121 11:20:43.564692 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerDied","Data":"e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e"} Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.209410 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.216189 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-sg-core-conf-yaml\") pod \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.216269 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-combined-ca-bundle\") pod \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.216378 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s26x6\" (UniqueName: \"kubernetes.io/projected/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-kube-api-access-s26x6\") pod \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.216466 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-config-data\") pod \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.216556 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-scripts\") pod \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.216690 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-log-httpd\") pod \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.216723 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-run-httpd\") pod \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\" (UID: \"fd6b3950-f1a0-47e8-9c96-3e632e8ca687\") " Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.217935 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fd6b3950-f1a0-47e8-9c96-3e632e8ca687" (UID: "fd6b3950-f1a0-47e8-9c96-3e632e8ca687"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.218646 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fd6b3950-f1a0-47e8-9c96-3e632e8ca687" (UID: "fd6b3950-f1a0-47e8-9c96-3e632e8ca687"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.225109 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-kube-api-access-s26x6" (OuterVolumeSpecName: "kube-api-access-s26x6") pod "fd6b3950-f1a0-47e8-9c96-3e632e8ca687" (UID: "fd6b3950-f1a0-47e8-9c96-3e632e8ca687"). InnerVolumeSpecName "kube-api-access-s26x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.225185 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-scripts" (OuterVolumeSpecName: "scripts") pod "fd6b3950-f1a0-47e8-9c96-3e632e8ca687" (UID: "fd6b3950-f1a0-47e8-9c96-3e632e8ca687"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.300532 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fd6b3950-f1a0-47e8-9c96-3e632e8ca687" (UID: "fd6b3950-f1a0-47e8-9c96-3e632e8ca687"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.318872 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.318948 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s26x6\" (UniqueName: \"kubernetes.io/projected/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-kube-api-access-s26x6\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.318972 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.318986 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.319027 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.340134 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd6b3950-f1a0-47e8-9c96-3e632e8ca687" (UID: "fd6b3950-f1a0-47e8-9c96-3e632e8ca687"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.360956 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-config-data" (OuterVolumeSpecName: "config-data") pod "fd6b3950-f1a0-47e8-9c96-3e632e8ca687" (UID: "fd6b3950-f1a0-47e8-9c96-3e632e8ca687"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.421008 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.421276 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd6b3950-f1a0-47e8-9c96-3e632e8ca687-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.622871 4925 generic.go:334] "Generic (PLEG): container finished" podID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerID="a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38" exitCode=0 Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.622928 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerDied","Data":"a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38"} Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.622963 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"fd6b3950-f1a0-47e8-9c96-3e632e8ca687","Type":"ContainerDied","Data":"45b2d1c90be79c4b6db4ad2bd204b42cb75dd300d26c4f2e2f583c01c6345a49"} Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.622987 4925 scope.go:117] "RemoveContainer" containerID="23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.623451 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.764225 4925 scope.go:117] "RemoveContainer" containerID="db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.840556 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.846702 4925 scope.go:117] "RemoveContainer" containerID="a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.861224 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.912169 4925 scope.go:117] "RemoveContainer" containerID="e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.913981 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:48 crc kubenswrapper[4925]: E0121 11:20:48.914444 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="proxy-httpd" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914466 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="proxy-httpd" Jan 21 11:20:48 crc kubenswrapper[4925]: E0121 11:20:48.914480 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-central-agent" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914488 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-central-agent" Jan 21 11:20:48 crc kubenswrapper[4925]: E0121 11:20:48.914520 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-notification-agent" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914527 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-notification-agent" Jan 21 11:20:48 crc kubenswrapper[4925]: E0121 11:20:48.914542 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="sg-core" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914548 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="sg-core" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914693 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="sg-core" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914743 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-central-agent" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914754 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="proxy-httpd" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.914766 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" containerName="ceilometer-notification-agent" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.916319 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.924895 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.926169 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.930956 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:48 crc kubenswrapper[4925]: I0121 11:20:48.997800 4925 scope.go:117] "RemoveContainer" containerID="23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2" Jan 21 11:20:49 crc kubenswrapper[4925]: E0121 11:20:49.000344 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2\": container with ID starting with 23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2 not found: ID does not exist" containerID="23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.000383 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2"} err="failed to get container status \"23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2\": rpc error: code = NotFound desc = could not find container \"23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2\": container with ID starting with 23ddbcfc25538543df0127c9807eb3ffcd3d4cdb07f6acac474688a2f00ec3e2 not found: ID does not exist" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.000426 4925 scope.go:117] "RemoveContainer" containerID="db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327" Jan 21 11:20:49 crc kubenswrapper[4925]: E0121 11:20:49.004342 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327\": container with ID starting with db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327 not found: ID does not exist" containerID="db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.005225 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327"} err="failed to get container status \"db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327\": rpc error: code = NotFound desc = could not find container \"db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327\": container with ID starting with db72c9253eecc1bd1d85d35ca692c6b093a01c6f242f2ac66346d1436928e327 not found: ID does not exist" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.005263 4925 scope.go:117] "RemoveContainer" containerID="a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38" Jan 21 11:20:49 crc kubenswrapper[4925]: E0121 11:20:49.008431 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38\": container with ID starting with a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38 not found: ID does not exist" containerID="a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.009192 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38"} err="failed to get container status \"a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38\": rpc error: code = NotFound desc = could not find container \"a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38\": container with ID starting with a07ea54bdbc247022eaf63aaa5eb69842407aa064d737eefc3371c58657fce38 not found: ID does not exist" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.009248 4925 scope.go:117] "RemoveContainer" containerID="e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e" Jan 21 11:20:49 crc kubenswrapper[4925]: E0121 11:20:49.010623 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e\": container with ID starting with e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e not found: ID does not exist" containerID="e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.010664 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e"} err="failed to get container status \"e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e\": rpc error: code = NotFound desc = could not find container \"e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e\": container with ID starting with e7126595755243b6b564b1aad8919a3b613f6c71fd14676b489ccb2aa569b25e not found: ID does not exist" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.039796 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-log-httpd\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.039920 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clpd8\" (UniqueName: \"kubernetes.io/projected/934162df-b33a-4c92-9d6c-668ff334264a-kube-api-access-clpd8\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.039985 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-run-httpd\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.040030 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-config-data\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.040125 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-scripts\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.040171 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.040228 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.142227 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-log-httpd\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.142335 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clpd8\" (UniqueName: \"kubernetes.io/projected/934162df-b33a-4c92-9d6c-668ff334264a-kube-api-access-clpd8\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.142376 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-run-httpd\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.142428 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-config-data\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.142493 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-scripts\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.142521 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.142545 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.143267 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-run-httpd\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.143747 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-log-httpd\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.148605 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.148897 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-config-data\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.150433 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.159323 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-scripts\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.162384 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clpd8\" (UniqueName: \"kubernetes.io/projected/934162df-b33a-4c92-9d6c-668ff334264a-kube-api-access-clpd8\") pod \"ceilometer-0\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.266071 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.519726 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd6b3950-f1a0-47e8-9c96-3e632e8ca687" path="/var/lib/kubelet/pods/fd6b3950-f1a0-47e8-9c96-3e632e8ca687/volumes" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.765769 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.940666 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.940954 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.941003 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.941772 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6fb1cacdd241e7a8efac0b528deff5f04d57c5b631c8479e71c5d41a4ae7e250"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:20:49 crc kubenswrapper[4925]: I0121 11:20:49.941888 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://6fb1cacdd241e7a8efac0b528deff5f04d57c5b631c8479e71c5d41a4ae7e250" gracePeriod=600 Jan 21 11:20:50 crc kubenswrapper[4925]: I0121 11:20:50.644447 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerStarted","Data":"1dfbfce8bb83f73f60791ee4da73c12c5b818bf4d6c49001809167d54d257c3f"} Jan 21 11:20:50 crc kubenswrapper[4925]: I0121 11:20:50.647385 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="6fb1cacdd241e7a8efac0b528deff5f04d57c5b631c8479e71c5d41a4ae7e250" exitCode=0 Jan 21 11:20:50 crc kubenswrapper[4925]: I0121 11:20:50.647798 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"6fb1cacdd241e7a8efac0b528deff5f04d57c5b631c8479e71c5d41a4ae7e250"} Jan 21 11:20:50 crc kubenswrapper[4925]: I0121 11:20:50.647965 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73"} Jan 21 11:20:50 crc kubenswrapper[4925]: I0121 11:20:50.648062 4925 scope.go:117] "RemoveContainer" containerID="67d412d76a3774c8b426878268b1816585378c0b05acfee3e5041ad5e7dbd93a" Jan 21 11:20:51 crc kubenswrapper[4925]: I0121 11:20:51.657895 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerStarted","Data":"f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8"} Jan 21 11:20:52 crc kubenswrapper[4925]: I0121 11:20:52.673508 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerStarted","Data":"e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd"} Jan 21 11:20:52 crc kubenswrapper[4925]: I0121 11:20:52.674159 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerStarted","Data":"d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2"} Jan 21 11:20:54 crc kubenswrapper[4925]: I0121 11:20:54.692793 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerStarted","Data":"b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6"} Jan 21 11:20:54 crc kubenswrapper[4925]: I0121 11:20:54.693440 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:20:54 crc kubenswrapper[4925]: I0121 11:20:54.728014 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=3.015913378 podStartE2EDuration="6.727983094s" podCreationTimestamp="2026-01-21 11:20:48 +0000 UTC" firstStartedPulling="2026-01-21 11:20:49.78006705 +0000 UTC m=+1541.383958984" lastFinishedPulling="2026-01-21 11:20:53.492136766 +0000 UTC m=+1545.096028700" observedRunningTime="2026-01-21 11:20:54.715378206 +0000 UTC m=+1546.319270150" watchObservedRunningTime="2026-01-21 11:20:54.727983094 +0000 UTC m=+1546.331875028" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.086803 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.540182 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/openstackclient"] Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.541960 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.544916 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"openstack-config-secret" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.546969 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"openstack-config" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.549883 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"openstackclient-openstackclient-dockercfg-ncnsp" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.555045 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstackclient"] Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.635106 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bssqj\" (UniqueName: \"kubernetes.io/projected/2c73a99a-7a7e-4746-9404-3dc64865ea05-kube-api-access-bssqj\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.635163 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2c73a99a-7a7e-4746-9404-3dc64865ea05-openstack-config\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.635187 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c73a99a-7a7e-4746-9404-3dc64865ea05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.635217 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2c73a99a-7a7e-4746-9404-3dc64865ea05-openstack-config-secret\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.937484 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bssqj\" (UniqueName: \"kubernetes.io/projected/2c73a99a-7a7e-4746-9404-3dc64865ea05-kube-api-access-bssqj\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.937532 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2c73a99a-7a7e-4746-9404-3dc64865ea05-openstack-config\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.937564 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c73a99a-7a7e-4746-9404-3dc64865ea05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.937593 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2c73a99a-7a7e-4746-9404-3dc64865ea05-openstack-config-secret\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.938814 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2c73a99a-7a7e-4746-9404-3dc64865ea05-openstack-config\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.950517 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c73a99a-7a7e-4746-9404-3dc64865ea05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.951008 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2c73a99a-7a7e-4746-9404-3dc64865ea05-openstack-config-secret\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:04 crc kubenswrapper[4925]: I0121 11:21:04.959931 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bssqj\" (UniqueName: \"kubernetes.io/projected/2c73a99a-7a7e-4746-9404-3dc64865ea05-kube-api-access-bssqj\") pod \"openstackclient\" (UID: \"2c73a99a-7a7e-4746-9404-3dc64865ea05\") " pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:05 crc kubenswrapper[4925]: I0121 11:21:05.175088 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/openstackclient" Jan 21 11:21:05 crc kubenswrapper[4925]: I0121 11:21:05.893154 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/openstackclient"] Jan 21 11:21:05 crc kubenswrapper[4925]: W0121 11:21:05.896803 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c73a99a_7a7e_4746_9404_3dc64865ea05.slice/crio-6ea417f3ebb7eeff3f05f8973f00f233667f977142d820170c96a3f84ec69239 WatchSource:0}: Error finding container 6ea417f3ebb7eeff3f05f8973f00f233667f977142d820170c96a3f84ec69239: Status 404 returned error can't find the container with id 6ea417f3ebb7eeff3f05f8973f00f233667f977142d820170c96a3f84ec69239 Jan 21 11:21:05 crc kubenswrapper[4925]: I0121 11:21:05.946533 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstackclient" event={"ID":"2c73a99a-7a7e-4746-9404-3dc64865ea05","Type":"ContainerStarted","Data":"6ea417f3ebb7eeff3f05f8973f00f233667f977142d820170c96a3f84ec69239"} Jan 21 11:21:12 crc kubenswrapper[4925]: I0121 11:21:12.945938 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lhq4n"] Jan 21 11:21:12 crc kubenswrapper[4925]: I0121 11:21:12.950270 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:12 crc kubenswrapper[4925]: I0121 11:21:12.960668 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lhq4n"] Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.080280 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-catalog-content\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.080728 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-utilities\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.080761 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdb7x\" (UniqueName: \"kubernetes.io/projected/1bb05265-9c05-42cf-84bb-772dc4393057-kube-api-access-jdb7x\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.182256 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-catalog-content\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.182338 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdb7x\" (UniqueName: \"kubernetes.io/projected/1bb05265-9c05-42cf-84bb-772dc4393057-kube-api-access-jdb7x\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.182372 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-utilities\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.182968 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-catalog-content\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.182987 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-utilities\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.210723 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdb7x\" (UniqueName: \"kubernetes.io/projected/1bb05265-9c05-42cf-84bb-772dc4393057-kube-api-access-jdb7x\") pod \"redhat-operators-lhq4n\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:13 crc kubenswrapper[4925]: I0121 11:21:13.280199 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:18 crc kubenswrapper[4925]: I0121 11:21:18.274349 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lhq4n"] Jan 21 11:21:18 crc kubenswrapper[4925]: W0121 11:21:18.291062 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bb05265_9c05_42cf_84bb_772dc4393057.slice/crio-d729da01591fcbc75a8455e24fd2a52303bce2a77949b6642163696e7ebe9727 WatchSource:0}: Error finding container d729da01591fcbc75a8455e24fd2a52303bce2a77949b6642163696e7ebe9727: Status 404 returned error can't find the container with id d729da01591fcbc75a8455e24fd2a52303bce2a77949b6642163696e7ebe9727 Jan 21 11:21:18 crc kubenswrapper[4925]: E0121 11:21:18.625440 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bb05265_9c05_42cf_84bb_772dc4393057.slice/crio-e09d24ecd59c23c019bfbe66b298fa3c89d87fa284a9d00016af7f3b48361e8b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bb05265_9c05_42cf_84bb_772dc4393057.slice/crio-conmon-e09d24ecd59c23c019bfbe66b298fa3c89d87fa284a9d00016af7f3b48361e8b.scope\": RecentStats: unable to find data in memory cache]" Jan 21 11:21:19 crc kubenswrapper[4925]: I0121 11:21:19.101949 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lhq4n" event={"ID":"1bb05265-9c05-42cf-84bb-772dc4393057","Type":"ContainerDied","Data":"e09d24ecd59c23c019bfbe66b298fa3c89d87fa284a9d00016af7f3b48361e8b"} Jan 21 11:21:19 crc kubenswrapper[4925]: I0121 11:21:19.101878 4925 generic.go:334] "Generic (PLEG): container finished" podID="1bb05265-9c05-42cf-84bb-772dc4393057" containerID="e09d24ecd59c23c019bfbe66b298fa3c89d87fa284a9d00016af7f3b48361e8b" exitCode=0 Jan 21 11:21:19 crc kubenswrapper[4925]: I0121 11:21:19.102546 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lhq4n" event={"ID":"1bb05265-9c05-42cf-84bb-772dc4393057","Type":"ContainerStarted","Data":"d729da01591fcbc75a8455e24fd2a52303bce2a77949b6642163696e7ebe9727"} Jan 21 11:21:19 crc kubenswrapper[4925]: I0121 11:21:19.105135 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/openstackclient" event={"ID":"2c73a99a-7a7e-4746-9404-3dc64865ea05","Type":"ContainerStarted","Data":"3dff9668cfd8f74553a2b94aae852495f7e0ab48d19887c9f23d7833a027a095"} Jan 21 11:21:19 crc kubenswrapper[4925]: I0121 11:21:19.272212 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:19 crc kubenswrapper[4925]: I0121 11:21:19.304639 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/openstackclient" podStartSLOduration=3.319307265 podStartE2EDuration="15.304618463s" podCreationTimestamp="2026-01-21 11:21:04 +0000 UTC" firstStartedPulling="2026-01-21 11:21:05.901745216 +0000 UTC m=+1557.505637150" lastFinishedPulling="2026-01-21 11:21:17.887056414 +0000 UTC m=+1569.490948348" observedRunningTime="2026-01-21 11:21:19.147413915 +0000 UTC m=+1570.751305879" watchObservedRunningTime="2026-01-21 11:21:19.304618463 +0000 UTC m=+1570.908510387" Jan 21 11:21:20 crc kubenswrapper[4925]: I0121 11:21:20.114919 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lhq4n" event={"ID":"1bb05265-9c05-42cf-84bb-772dc4393057","Type":"ContainerStarted","Data":"2586edbc6114d143017f7c5a549a1edc7a7107ffb92de81c74424119b8fea061"} Jan 21 11:21:22 crc kubenswrapper[4925]: I0121 11:21:22.139868 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:21:22 crc kubenswrapper[4925]: I0121 11:21:22.140549 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/kube-state-metrics-0" podUID="39b2180d-2f0e-472f-937f-3b25cf112bae" containerName="kube-state-metrics" containerID="cri-o://41f8a4515161523db7491d9b04231422b1096aad3dc3c3a4038c5a7efa0fdd89" gracePeriod=30 Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.161451 4925 generic.go:334] "Generic (PLEG): container finished" podID="1bb05265-9c05-42cf-84bb-772dc4393057" containerID="2586edbc6114d143017f7c5a549a1edc7a7107ffb92de81c74424119b8fea061" exitCode=0 Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.161728 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lhq4n" event={"ID":"1bb05265-9c05-42cf-84bb-772dc4393057","Type":"ContainerDied","Data":"2586edbc6114d143017f7c5a549a1edc7a7107ffb92de81c74424119b8fea061"} Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.165423 4925 generic.go:334] "Generic (PLEG): container finished" podID="39b2180d-2f0e-472f-937f-3b25cf112bae" containerID="41f8a4515161523db7491d9b04231422b1096aad3dc3c3a4038c5a7efa0fdd89" exitCode=2 Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.165489 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"39b2180d-2f0e-472f-937f-3b25cf112bae","Type":"ContainerDied","Data":"41f8a4515161523db7491d9b04231422b1096aad3dc3c3a4038c5a7efa0fdd89"} Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.165526 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"39b2180d-2f0e-472f-937f-3b25cf112bae","Type":"ContainerDied","Data":"a81a8ddc36d261c2b07ae470ecc2c97c58e43ed3916648cba2f02ee6f548e67e"} Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.165540 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a81a8ddc36d261c2b07ae470ecc2c97c58e43ed3916648cba2f02ee6f548e67e" Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.199770 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.278351 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.278671 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-central-agent" containerID="cri-o://f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8" gracePeriod=30 Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.278757 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="proxy-httpd" containerID="cri-o://b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6" gracePeriod=30 Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.278757 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="sg-core" containerID="cri-o://e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd" gracePeriod=30 Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.278864 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-notification-agent" containerID="cri-o://d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2" gracePeriod=30 Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.322549 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjwr6\" (UniqueName: \"kubernetes.io/projected/39b2180d-2f0e-472f-937f-3b25cf112bae-kube-api-access-hjwr6\") pod \"39b2180d-2f0e-472f-937f-3b25cf112bae\" (UID: \"39b2180d-2f0e-472f-937f-3b25cf112bae\") " Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.332358 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39b2180d-2f0e-472f-937f-3b25cf112bae-kube-api-access-hjwr6" (OuterVolumeSpecName: "kube-api-access-hjwr6") pod "39b2180d-2f0e-472f-937f-3b25cf112bae" (UID: "39b2180d-2f0e-472f-937f-3b25cf112bae"). InnerVolumeSpecName "kube-api-access-hjwr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:21:23 crc kubenswrapper[4925]: I0121 11:21:23.424797 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjwr6\" (UniqueName: \"kubernetes.io/projected/39b2180d-2f0e-472f-937f-3b25cf112bae-kube-api-access-hjwr6\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.176164 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lhq4n" event={"ID":"1bb05265-9c05-42cf-84bb-772dc4393057","Type":"ContainerStarted","Data":"5aaa4ff8321e9b754a751cd0dee72534902cc36e45dd13c02859591cba089159"} Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.181331 4925 generic.go:334] "Generic (PLEG): container finished" podID="934162df-b33a-4c92-9d6c-668ff334264a" containerID="b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6" exitCode=0 Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.181376 4925 generic.go:334] "Generic (PLEG): container finished" podID="934162df-b33a-4c92-9d6c-668ff334264a" containerID="e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd" exitCode=2 Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.181388 4925 generic.go:334] "Generic (PLEG): container finished" podID="934162df-b33a-4c92-9d6c-668ff334264a" containerID="f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8" exitCode=0 Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.181504 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.182324 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerDied","Data":"b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6"} Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.182362 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerDied","Data":"e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd"} Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.182379 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerDied","Data":"f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8"} Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.203568 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lhq4n" podStartSLOduration=7.591066127 podStartE2EDuration="12.203546502s" podCreationTimestamp="2026-01-21 11:21:12 +0000 UTC" firstStartedPulling="2026-01-21 11:21:19.104346686 +0000 UTC m=+1570.708238640" lastFinishedPulling="2026-01-21 11:21:23.716827081 +0000 UTC m=+1575.320719015" observedRunningTime="2026-01-21 11:21:24.196830229 +0000 UTC m=+1575.800722163" watchObservedRunningTime="2026-01-21 11:21:24.203546502 +0000 UTC m=+1575.807438436" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.221123 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.229908 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.258097 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:21:24 crc kubenswrapper[4925]: E0121 11:21:24.258513 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39b2180d-2f0e-472f-937f-3b25cf112bae" containerName="kube-state-metrics" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.258543 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="39b2180d-2f0e-472f-937f-3b25cf112bae" containerName="kube-state-metrics" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.258771 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="39b2180d-2f0e-472f-937f-3b25cf112bae" containerName="kube-state-metrics" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.259578 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.264513 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"kube-state-metrics-tls-config" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.269653 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-kube-state-metrics-svc" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.279372 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.444740 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkr5t\" (UniqueName: \"kubernetes.io/projected/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-api-access-fkr5t\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.444841 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.445375 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.445485 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.546676 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.546761 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkr5t\" (UniqueName: \"kubernetes.io/projected/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-api-access-fkr5t\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.546790 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.546909 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.552344 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.554163 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.561606 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.566820 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkr5t\" (UniqueName: \"kubernetes.io/projected/36d577b6-db6f-4302-a839-ed148c56f7b6-kube-api-access-fkr5t\") pod \"kube-state-metrics-0\" (UID: \"36d577b6-db6f-4302-a839-ed148c56f7b6\") " pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:24 crc kubenswrapper[4925]: I0121 11:21:24.580735 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.040923 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.160026 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/kube-state-metrics-0"] Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.163783 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-log-httpd\") pod \"934162df-b33a-4c92-9d6c-668ff334264a\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.163843 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-sg-core-conf-yaml\") pod \"934162df-b33a-4c92-9d6c-668ff334264a\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.163995 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-scripts\") pod \"934162df-b33a-4c92-9d6c-668ff334264a\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.164028 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-run-httpd\") pod \"934162df-b33a-4c92-9d6c-668ff334264a\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.164084 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clpd8\" (UniqueName: \"kubernetes.io/projected/934162df-b33a-4c92-9d6c-668ff334264a-kube-api-access-clpd8\") pod \"934162df-b33a-4c92-9d6c-668ff334264a\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.164692 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "934162df-b33a-4c92-9d6c-668ff334264a" (UID: "934162df-b33a-4c92-9d6c-668ff334264a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.164764 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-config-data\") pod \"934162df-b33a-4c92-9d6c-668ff334264a\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.164871 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-combined-ca-bundle\") pod \"934162df-b33a-4c92-9d6c-668ff334264a\" (UID: \"934162df-b33a-4c92-9d6c-668ff334264a\") " Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.165569 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.171849 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-scripts" (OuterVolumeSpecName: "scripts") pod "934162df-b33a-4c92-9d6c-668ff334264a" (UID: "934162df-b33a-4c92-9d6c-668ff334264a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.174616 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "934162df-b33a-4c92-9d6c-668ff334264a" (UID: "934162df-b33a-4c92-9d6c-668ff334264a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.190576 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/934162df-b33a-4c92-9d6c-668ff334264a-kube-api-access-clpd8" (OuterVolumeSpecName: "kube-api-access-clpd8") pod "934162df-b33a-4c92-9d6c-668ff334264a" (UID: "934162df-b33a-4c92-9d6c-668ff334264a"). InnerVolumeSpecName "kube-api-access-clpd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.204487 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "934162df-b33a-4c92-9d6c-668ff334264a" (UID: "934162df-b33a-4c92-9d6c-668ff334264a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.209012 4925 generic.go:334] "Generic (PLEG): container finished" podID="934162df-b33a-4c92-9d6c-668ff334264a" containerID="d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2" exitCode=0 Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.209080 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.209120 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerDied","Data":"d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2"} Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.209156 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"934162df-b33a-4c92-9d6c-668ff334264a","Type":"ContainerDied","Data":"1dfbfce8bb83f73f60791ee4da73c12c5b818bf4d6c49001809167d54d257c3f"} Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.209176 4925 scope.go:117] "RemoveContainer" containerID="b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.212966 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"36d577b6-db6f-4302-a839-ed148c56f7b6","Type":"ContainerStarted","Data":"4ce2890c48df8aa4623a09962b1baf6a6672f5f53e32bfc483557829c14c0fb2"} Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.240338 4925 scope.go:117] "RemoveContainer" containerID="e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.260686 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "934162df-b33a-4c92-9d6c-668ff334264a" (UID: "934162df-b33a-4c92-9d6c-668ff334264a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.267826 4925 scope.go:117] "RemoveContainer" containerID="d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.273128 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.273175 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/934162df-b33a-4c92-9d6c-668ff334264a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.273822 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clpd8\" (UniqueName: \"kubernetes.io/projected/934162df-b33a-4c92-9d6c-668ff334264a-kube-api-access-clpd8\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.273859 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.273893 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.277697 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-config-data" (OuterVolumeSpecName: "config-data") pod "934162df-b33a-4c92-9d6c-668ff334264a" (UID: "934162df-b33a-4c92-9d6c-668ff334264a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.321947 4925 scope.go:117] "RemoveContainer" containerID="f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.345180 4925 scope.go:117] "RemoveContainer" containerID="b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6" Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.347614 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6\": container with ID starting with b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6 not found: ID does not exist" containerID="b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.347661 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6"} err="failed to get container status \"b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6\": rpc error: code = NotFound desc = could not find container \"b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6\": container with ID starting with b7374039f08b6c0e25816931702ff4c7b5a4d6edb6ec441f43dd77a46386e5f6 not found: ID does not exist" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.347710 4925 scope.go:117] "RemoveContainer" containerID="e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd" Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.348069 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd\": container with ID starting with e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd not found: ID does not exist" containerID="e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.348095 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd"} err="failed to get container status \"e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd\": rpc error: code = NotFound desc = could not find container \"e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd\": container with ID starting with e62ae3f43dd6696d70e26cae163bae12c26c8ac83631cd7a630455044dc3dffd not found: ID does not exist" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.348123 4925 scope.go:117] "RemoveContainer" containerID="d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2" Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.348507 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2\": container with ID starting with d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2 not found: ID does not exist" containerID="d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.348534 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2"} err="failed to get container status \"d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2\": rpc error: code = NotFound desc = could not find container \"d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2\": container with ID starting with d95f092b201079397db14c171d381a2c6a2f29140c48b80d7ecc878e259019d2 not found: ID does not exist" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.348553 4925 scope.go:117] "RemoveContainer" containerID="f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8" Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.348918 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8\": container with ID starting with f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8 not found: ID does not exist" containerID="f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.348959 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8"} err="failed to get container status \"f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8\": rpc error: code = NotFound desc = could not find container \"f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8\": container with ID starting with f26d2a020464af39fbf26fbb7b7162a13218367318ab3ca9e1fb39a8f5aee3d8 not found: ID does not exist" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.376868 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/934162df-b33a-4c92-9d6c-668ff334264a-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.519010 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39b2180d-2f0e-472f-937f-3b25cf112bae" path="/var/lib/kubelet/pods/39b2180d-2f0e-472f-937f-3b25cf112bae/volumes" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.560111 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.574002 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.592491 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.593202 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="proxy-httpd" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593220 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="proxy-httpd" Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.593236 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="sg-core" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593243 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="sg-core" Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.593297 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-central-agent" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593304 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-central-agent" Jan 21 11:21:25 crc kubenswrapper[4925]: E0121 11:21:25.593324 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-notification-agent" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593330 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-notification-agent" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593491 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="proxy-httpd" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593503 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-central-agent" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593518 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="ceilometer-notification-agent" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.593530 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="934162df-b33a-4c92-9d6c-668ff334264a" containerName="sg-core" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.600659 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.605620 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.605910 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.606093 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.626112 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.784988 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-scripts\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.785095 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-run-httpd\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.785135 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-config-data\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.785173 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.785293 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.785335 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.785360 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftd4g\" (UniqueName: \"kubernetes.io/projected/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-kube-api-access-ftd4g\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.785379 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-log-httpd\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887380 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-scripts\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887521 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-run-httpd\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887576 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-config-data\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887611 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887662 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887686 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887719 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftd4g\" (UniqueName: \"kubernetes.io/projected/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-kube-api-access-ftd4g\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.887741 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-log-httpd\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.888312 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-run-httpd\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.888360 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-log-httpd\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.893358 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-scripts\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.894107 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.895298 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.896258 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.896577 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-config-data\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:25 crc kubenswrapper[4925]: I0121 11:21:25.917786 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftd4g\" (UniqueName: \"kubernetes.io/projected/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-kube-api-access-ftd4g\") pod \"ceilometer-0\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:26 crc kubenswrapper[4925]: I0121 11:21:26.032211 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.266835 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/kube-state-metrics-0" event={"ID":"36d577b6-db6f-4302-a839-ed148c56f7b6","Type":"ContainerStarted","Data":"c7a40b7d8127465235b36564edca1820882310524051d990a7550cda5e0195b8"} Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.268675 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.332000 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/kube-state-metrics-0" podStartSLOduration=1.983993836 podStartE2EDuration="2.33197034s" podCreationTimestamp="2026-01-21 11:21:24 +0000 UTC" firstStartedPulling="2026-01-21 11:21:25.191750369 +0000 UTC m=+1576.795642303" lastFinishedPulling="2026-01-21 11:21:25.539726873 +0000 UTC m=+1577.143618807" observedRunningTime="2026-01-21 11:21:26.316818992 +0000 UTC m=+1577.920710936" watchObservedRunningTime="2026-01-21 11:21:26.33197034 +0000 UTC m=+1577.935862274" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.600508 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-mpckb"] Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.602077 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.630535 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-mpckb"] Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.645318 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.691869 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m"] Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.692911 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.702986 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.721461 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m"] Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.723412 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfwwg\" (UniqueName: \"kubernetes.io/projected/1e7f680c-2b41-48df-b73e-815b8afec52a-kube-api-access-kfwwg\") pod \"watcher-db-create-mpckb\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.724037 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e7f680c-2b41-48df-b73e-815b8afec52a-operator-scripts\") pod \"watcher-db-create-mpckb\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.825654 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t8mt\" (UniqueName: \"kubernetes.io/projected/37a2f6c8-08e8-4000-84f6-5d639c2def77-kube-api-access-4t8mt\") pod \"watcher-7bc3-account-create-update-6qv8m\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.825754 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e7f680c-2b41-48df-b73e-815b8afec52a-operator-scripts\") pod \"watcher-db-create-mpckb\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.825781 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37a2f6c8-08e8-4000-84f6-5d639c2def77-operator-scripts\") pod \"watcher-7bc3-account-create-update-6qv8m\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.825856 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfwwg\" (UniqueName: \"kubernetes.io/projected/1e7f680c-2b41-48df-b73e-815b8afec52a-kube-api-access-kfwwg\") pod \"watcher-db-create-mpckb\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.826848 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e7f680c-2b41-48df-b73e-815b8afec52a-operator-scripts\") pod \"watcher-db-create-mpckb\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.848349 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfwwg\" (UniqueName: \"kubernetes.io/projected/1e7f680c-2b41-48df-b73e-815b8afec52a-kube-api-access-kfwwg\") pod \"watcher-db-create-mpckb\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.927768 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t8mt\" (UniqueName: \"kubernetes.io/projected/37a2f6c8-08e8-4000-84f6-5d639c2def77-kube-api-access-4t8mt\") pod \"watcher-7bc3-account-create-update-6qv8m\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.927885 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37a2f6c8-08e8-4000-84f6-5d639c2def77-operator-scripts\") pod \"watcher-7bc3-account-create-update-6qv8m\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.928874 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37a2f6c8-08e8-4000-84f6-5d639c2def77-operator-scripts\") pod \"watcher-7bc3-account-create-update-6qv8m\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:26.946929 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t8mt\" (UniqueName: \"kubernetes.io/projected/37a2f6c8-08e8-4000-84f6-5d639c2def77-kube-api-access-4t8mt\") pod \"watcher-7bc3-account-create-update-6qv8m\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:27.043292 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:27.047473 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:27.300668 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerStarted","Data":"a5acd6933d088043cbd0d34ddbc8d308014836b53de481d6df80e065abe15ddf"} Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:27.512299 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="934162df-b33a-4c92-9d6c-668ff334264a" path="/var/lib/kubelet/pods/934162df-b33a-4c92-9d6c-668ff334264a/volumes" Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:27.916867 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-mpckb"] Jan 21 11:21:27 crc kubenswrapper[4925]: W0121 11:21:27.930331 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e7f680c_2b41_48df_b73e_815b8afec52a.slice/crio-2e1edb4c88e17ebf2e9ce46cc5808ec175074120073e12ff28dafa4ad1d77796 WatchSource:0}: Error finding container 2e1edb4c88e17ebf2e9ce46cc5808ec175074120073e12ff28dafa4ad1d77796: Status 404 returned error can't find the container with id 2e1edb4c88e17ebf2e9ce46cc5808ec175074120073e12ff28dafa4ad1d77796 Jan 21 11:21:27 crc kubenswrapper[4925]: I0121 11:21:27.943944 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m"] Jan 21 11:21:27 crc kubenswrapper[4925]: W0121 11:21:27.954623 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37a2f6c8_08e8_4000_84f6_5d639c2def77.slice/crio-e339ac91baad2cc13244cfc908d72f33df8899fbd1a97d0a3e64132cf6706d68 WatchSource:0}: Error finding container e339ac91baad2cc13244cfc908d72f33df8899fbd1a97d0a3e64132cf6706d68: Status 404 returned error can't find the container with id e339ac91baad2cc13244cfc908d72f33df8899fbd1a97d0a3e64132cf6706d68 Jan 21 11:21:28 crc kubenswrapper[4925]: I0121 11:21:28.309432 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-mpckb" event={"ID":"1e7f680c-2b41-48df-b73e-815b8afec52a","Type":"ContainerStarted","Data":"2e1edb4c88e17ebf2e9ce46cc5808ec175074120073e12ff28dafa4ad1d77796"} Jan 21 11:21:28 crc kubenswrapper[4925]: I0121 11:21:28.312017 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerStarted","Data":"3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07"} Jan 21 11:21:28 crc kubenswrapper[4925]: I0121 11:21:28.314320 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" event={"ID":"37a2f6c8-08e8-4000-84f6-5d639c2def77","Type":"ContainerStarted","Data":"e339ac91baad2cc13244cfc908d72f33df8899fbd1a97d0a3e64132cf6706d68"} Jan 21 11:21:29 crc kubenswrapper[4925]: I0121 11:21:29.321930 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" event={"ID":"37a2f6c8-08e8-4000-84f6-5d639c2def77","Type":"ContainerStarted","Data":"9cbd272345e52a2e9c3ecffa77cd1497804997c6b1e19e1a1d049859cd0a2308"} Jan 21 11:21:29 crc kubenswrapper[4925]: I0121 11:21:29.324407 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-mpckb" event={"ID":"1e7f680c-2b41-48df-b73e-815b8afec52a","Type":"ContainerStarted","Data":"a26b6af523be5c6783095117bd8aaf9237989807a026f8ab07b6d3d8d781857e"} Jan 21 11:21:29 crc kubenswrapper[4925]: I0121 11:21:29.342764 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" podStartSLOduration=3.342741358 podStartE2EDuration="3.342741358s" podCreationTimestamp="2026-01-21 11:21:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:21:29.340799617 +0000 UTC m=+1580.944691551" watchObservedRunningTime="2026-01-21 11:21:29.342741358 +0000 UTC m=+1580.946633292" Jan 21 11:21:29 crc kubenswrapper[4925]: I0121 11:21:29.380089 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-db-create-mpckb" podStartSLOduration=3.380063935 podStartE2EDuration="3.380063935s" podCreationTimestamp="2026-01-21 11:21:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:21:29.373629912 +0000 UTC m=+1580.977521846" watchObservedRunningTime="2026-01-21 11:21:29.380063935 +0000 UTC m=+1580.983955869" Jan 21 11:21:30 crc kubenswrapper[4925]: I0121 11:21:30.335194 4925 generic.go:334] "Generic (PLEG): container finished" podID="1e7f680c-2b41-48df-b73e-815b8afec52a" containerID="a26b6af523be5c6783095117bd8aaf9237989807a026f8ab07b6d3d8d781857e" exitCode=0 Jan 21 11:21:30 crc kubenswrapper[4925]: I0121 11:21:30.335293 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-mpckb" event={"ID":"1e7f680c-2b41-48df-b73e-815b8afec52a","Type":"ContainerDied","Data":"a26b6af523be5c6783095117bd8aaf9237989807a026f8ab07b6d3d8d781857e"} Jan 21 11:21:30 crc kubenswrapper[4925]: I0121 11:21:30.338287 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerStarted","Data":"78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46"} Jan 21 11:21:30 crc kubenswrapper[4925]: I0121 11:21:30.338346 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerStarted","Data":"fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1"} Jan 21 11:21:30 crc kubenswrapper[4925]: I0121 11:21:30.340290 4925 generic.go:334] "Generic (PLEG): container finished" podID="37a2f6c8-08e8-4000-84f6-5d639c2def77" containerID="9cbd272345e52a2e9c3ecffa77cd1497804997c6b1e19e1a1d049859cd0a2308" exitCode=0 Jan 21 11:21:30 crc kubenswrapper[4925]: I0121 11:21:30.340354 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" event={"ID":"37a2f6c8-08e8-4000-84f6-5d639c2def77","Type":"ContainerDied","Data":"9cbd272345e52a2e9c3ecffa77cd1497804997c6b1e19e1a1d049859cd0a2308"} Jan 21 11:21:31 crc kubenswrapper[4925]: I0121 11:21:31.886971 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:31 crc kubenswrapper[4925]: I0121 11:21:31.896341 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.009365 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e7f680c-2b41-48df-b73e-815b8afec52a-operator-scripts\") pod \"1e7f680c-2b41-48df-b73e-815b8afec52a\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.009450 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4t8mt\" (UniqueName: \"kubernetes.io/projected/37a2f6c8-08e8-4000-84f6-5d639c2def77-kube-api-access-4t8mt\") pod \"37a2f6c8-08e8-4000-84f6-5d639c2def77\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.009512 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37a2f6c8-08e8-4000-84f6-5d639c2def77-operator-scripts\") pod \"37a2f6c8-08e8-4000-84f6-5d639c2def77\" (UID: \"37a2f6c8-08e8-4000-84f6-5d639c2def77\") " Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.009556 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwwg\" (UniqueName: \"kubernetes.io/projected/1e7f680c-2b41-48df-b73e-815b8afec52a-kube-api-access-kfwwg\") pod \"1e7f680c-2b41-48df-b73e-815b8afec52a\" (UID: \"1e7f680c-2b41-48df-b73e-815b8afec52a\") " Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.010377 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e7f680c-2b41-48df-b73e-815b8afec52a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1e7f680c-2b41-48df-b73e-815b8afec52a" (UID: "1e7f680c-2b41-48df-b73e-815b8afec52a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.010419 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37a2f6c8-08e8-4000-84f6-5d639c2def77-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37a2f6c8-08e8-4000-84f6-5d639c2def77" (UID: "37a2f6c8-08e8-4000-84f6-5d639c2def77"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.016793 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37a2f6c8-08e8-4000-84f6-5d639c2def77-kube-api-access-4t8mt" (OuterVolumeSpecName: "kube-api-access-4t8mt") pod "37a2f6c8-08e8-4000-84f6-5d639c2def77" (UID: "37a2f6c8-08e8-4000-84f6-5d639c2def77"). InnerVolumeSpecName "kube-api-access-4t8mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.027832 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e7f680c-2b41-48df-b73e-815b8afec52a-kube-api-access-kfwwg" (OuterVolumeSpecName: "kube-api-access-kfwwg") pod "1e7f680c-2b41-48df-b73e-815b8afec52a" (UID: "1e7f680c-2b41-48df-b73e-815b8afec52a"). InnerVolumeSpecName "kube-api-access-kfwwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.111772 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37a2f6c8-08e8-4000-84f6-5d639c2def77-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.111821 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwwg\" (UniqueName: \"kubernetes.io/projected/1e7f680c-2b41-48df-b73e-815b8afec52a-kube-api-access-kfwwg\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.111836 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e7f680c-2b41-48df-b73e-815b8afec52a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.111845 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4t8mt\" (UniqueName: \"kubernetes.io/projected/37a2f6c8-08e8-4000-84f6-5d639c2def77-kube-api-access-4t8mt\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.360273 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-mpckb" event={"ID":"1e7f680c-2b41-48df-b73e-815b8afec52a","Type":"ContainerDied","Data":"2e1edb4c88e17ebf2e9ce46cc5808ec175074120073e12ff28dafa4ad1d77796"} Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.360321 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e1edb4c88e17ebf2e9ce46cc5808ec175074120073e12ff28dafa4ad1d77796" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.360337 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-mpckb" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.362433 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" event={"ID":"37a2f6c8-08e8-4000-84f6-5d639c2def77","Type":"ContainerDied","Data":"e339ac91baad2cc13244cfc908d72f33df8899fbd1a97d0a3e64132cf6706d68"} Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.362493 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e339ac91baad2cc13244cfc908d72f33df8899fbd1a97d0a3e64132cf6706d68" Jan 21 11:21:32 crc kubenswrapper[4925]: I0121 11:21:32.362570 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m" Jan 21 11:21:33 crc kubenswrapper[4925]: I0121 11:21:33.281182 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:33 crc kubenswrapper[4925]: I0121 11:21:33.281585 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:33 crc kubenswrapper[4925]: I0121 11:21:33.333476 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:33 crc kubenswrapper[4925]: I0121 11:21:33.373327 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerStarted","Data":"90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c"} Jan 21 11:21:33 crc kubenswrapper[4925]: I0121 11:21:33.373471 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:21:33 crc kubenswrapper[4925]: I0121 11:21:33.409075 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.622930496 podStartE2EDuration="8.409049437s" podCreationTimestamp="2026-01-21 11:21:25 +0000 UTC" firstStartedPulling="2026-01-21 11:21:26.634527842 +0000 UTC m=+1578.238419776" lastFinishedPulling="2026-01-21 11:21:32.420646783 +0000 UTC m=+1584.024538717" observedRunningTime="2026-01-21 11:21:33.403175301 +0000 UTC m=+1585.007067235" watchObservedRunningTime="2026-01-21 11:21:33.409049437 +0000 UTC m=+1585.012941371" Jan 21 11:21:33 crc kubenswrapper[4925]: I0121 11:21:33.419879 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:34 crc kubenswrapper[4925]: I0121 11:21:34.598829 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/kube-state-metrics-0" Jan 21 11:21:36 crc kubenswrapper[4925]: I0121 11:21:36.927847 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lhq4n"] Jan 21 11:21:36 crc kubenswrapper[4925]: I0121 11:21:36.928386 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lhq4n" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="registry-server" containerID="cri-o://5aaa4ff8321e9b754a751cd0dee72534902cc36e45dd13c02859591cba089159" gracePeriod=2 Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.049700 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8"] Jan 21 11:21:37 crc kubenswrapper[4925]: E0121 11:21:37.050427 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e7f680c-2b41-48df-b73e-815b8afec52a" containerName="mariadb-database-create" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.050532 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e7f680c-2b41-48df-b73e-815b8afec52a" containerName="mariadb-database-create" Jan 21 11:21:37 crc kubenswrapper[4925]: E0121 11:21:37.050630 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a2f6c8-08e8-4000-84f6-5d639c2def77" containerName="mariadb-account-create-update" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.050706 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a2f6c8-08e8-4000-84f6-5d639c2def77" containerName="mariadb-account-create-update" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.051015 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="37a2f6c8-08e8-4000-84f6-5d639c2def77" containerName="mariadb-account-create-update" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.051121 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e7f680c-2b41-48df-b73e-815b8afec52a" containerName="mariadb-database-create" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.051980 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.054344 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.055267 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-tkv9d" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.068350 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8"] Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.099569 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-db-sync-config-data\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.099772 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2l2s\" (UniqueName: \"kubernetes.io/projected/0fc9684a-4c42-45e5-91af-6935ebff149d-kube-api-access-n2l2s\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.099908 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.100018 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-config-data\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.201533 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.201995 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-config-data\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.202100 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-db-sync-config-data\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.202179 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2l2s\" (UniqueName: \"kubernetes.io/projected/0fc9684a-4c42-45e5-91af-6935ebff149d-kube-api-access-n2l2s\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.209324 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-db-sync-config-data\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.209539 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.209799 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-config-data\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.220078 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2l2s\" (UniqueName: \"kubernetes.io/projected/0fc9684a-4c42-45e5-91af-6935ebff149d-kube-api-access-n2l2s\") pod \"watcher-kuttl-db-sync-qhxk8\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.403689 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.418196 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.418648 4925 generic.go:334] "Generic (PLEG): container finished" podID="1bb05265-9c05-42cf-84bb-772dc4393057" containerID="5aaa4ff8321e9b754a751cd0dee72534902cc36e45dd13c02859591cba089159" exitCode=0 Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.418686 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lhq4n" event={"ID":"1bb05265-9c05-42cf-84bb-772dc4393057","Type":"ContainerDied","Data":"5aaa4ff8321e9b754a751cd0dee72534902cc36e45dd13c02859591cba089159"} Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.418712 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lhq4n" event={"ID":"1bb05265-9c05-42cf-84bb-772dc4393057","Type":"ContainerDied","Data":"d729da01591fcbc75a8455e24fd2a52303bce2a77949b6642163696e7ebe9727"} Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.418723 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d729da01591fcbc75a8455e24fd2a52303bce2a77949b6642163696e7ebe9727" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.610918 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdb7x\" (UniqueName: \"kubernetes.io/projected/1bb05265-9c05-42cf-84bb-772dc4393057-kube-api-access-jdb7x\") pod \"1bb05265-9c05-42cf-84bb-772dc4393057\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.611317 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-utilities\") pod \"1bb05265-9c05-42cf-84bb-772dc4393057\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.611511 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-catalog-content\") pod \"1bb05265-9c05-42cf-84bb-772dc4393057\" (UID: \"1bb05265-9c05-42cf-84bb-772dc4393057\") " Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.618967 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bb05265-9c05-42cf-84bb-772dc4393057-kube-api-access-jdb7x" (OuterVolumeSpecName: "kube-api-access-jdb7x") pod "1bb05265-9c05-42cf-84bb-772dc4393057" (UID: "1bb05265-9c05-42cf-84bb-772dc4393057"). InnerVolumeSpecName "kube-api-access-jdb7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.632578 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-utilities" (OuterVolumeSpecName: "utilities") pod "1bb05265-9c05-42cf-84bb-772dc4393057" (UID: "1bb05265-9c05-42cf-84bb-772dc4393057"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.713929 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.714185 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdb7x\" (UniqueName: \"kubernetes.io/projected/1bb05265-9c05-42cf-84bb-772dc4393057-kube-api-access-jdb7x\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.782926 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1bb05265-9c05-42cf-84bb-772dc4393057" (UID: "1bb05265-9c05-42cf-84bb-772dc4393057"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.818527 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb05265-9c05-42cf-84bb-772dc4393057-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:21:37 crc kubenswrapper[4925]: I0121 11:21:37.950762 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8"] Jan 21 11:21:37 crc kubenswrapper[4925]: W0121 11:21:37.955897 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fc9684a_4c42_45e5_91af_6935ebff149d.slice/crio-d097537355a3a0527a8be188f1ae50fb1f372c522a130196af03174c1e38a682 WatchSource:0}: Error finding container d097537355a3a0527a8be188f1ae50fb1f372c522a130196af03174c1e38a682: Status 404 returned error can't find the container with id d097537355a3a0527a8be188f1ae50fb1f372c522a130196af03174c1e38a682 Jan 21 11:21:38 crc kubenswrapper[4925]: I0121 11:21:38.449080 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" event={"ID":"0fc9684a-4c42-45e5-91af-6935ebff149d","Type":"ContainerStarted","Data":"d097537355a3a0527a8be188f1ae50fb1f372c522a130196af03174c1e38a682"} Jan 21 11:21:38 crc kubenswrapper[4925]: I0121 11:21:38.449111 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lhq4n" Jan 21 11:21:38 crc kubenswrapper[4925]: I0121 11:21:38.509531 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lhq4n"] Jan 21 11:21:38 crc kubenswrapper[4925]: I0121 11:21:38.519147 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lhq4n"] Jan 21 11:21:39 crc kubenswrapper[4925]: I0121 11:21:39.517918 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" path="/var/lib/kubelet/pods/1bb05265-9c05-42cf-84bb-772dc4393057/volumes" Jan 21 11:21:56 crc kubenswrapper[4925]: I0121 11:21:56.045217 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:00 crc kubenswrapper[4925]: E0121 11:22:00.726840 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.182:5001/podified-master-centos10/openstack-watcher-api:watcher_latest" Jan 21 11:22:00 crc kubenswrapper[4925]: E0121 11:22:00.729558 4925 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.182:5001/podified-master-centos10/openstack-watcher-api:watcher_latest" Jan 21 11:22:00 crc kubenswrapper[4925]: E0121 11:22:00.729869 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-kuttl-db-sync,Image:38.102.83.182:5001/podified-master-centos10/openstack-watcher-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/watcher/watcher.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n2l2s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-kuttl-db-sync-qhxk8_watcher-kuttl-default(0fc9684a-4c42-45e5-91af-6935ebff149d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 11:22:00 crc kubenswrapper[4925]: E0121 11:22:00.731061 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-kuttl-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" podUID="0fc9684a-4c42-45e5-91af-6935ebff149d" Jan 21 11:22:00 crc kubenswrapper[4925]: E0121 11:22:00.974522 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-kuttl-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.182:5001/podified-master-centos10/openstack-watcher-api:watcher_latest\\\"\"" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" podUID="0fc9684a-4c42-45e5-91af-6935ebff149d" Jan 21 11:22:15 crc kubenswrapper[4925]: I0121 11:22:15.115763 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" event={"ID":"0fc9684a-4c42-45e5-91af-6935ebff149d","Type":"ContainerStarted","Data":"9ca41a9bed7c069da2e84f8de9cb4942c4c3e2eaaa352437ea8573ded9caa68e"} Jan 21 11:22:15 crc kubenswrapper[4925]: I0121 11:22:15.141876 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" podStartSLOduration=1.520691447 podStartE2EDuration="38.141835849s" podCreationTimestamp="2026-01-21 11:21:37 +0000 UTC" firstStartedPulling="2026-01-21 11:21:37.958439791 +0000 UTC m=+1589.562331725" lastFinishedPulling="2026-01-21 11:22:14.579584193 +0000 UTC m=+1626.183476127" observedRunningTime="2026-01-21 11:22:15.133520195 +0000 UTC m=+1626.737412159" watchObservedRunningTime="2026-01-21 11:22:15.141835849 +0000 UTC m=+1626.745727783" Jan 21 11:22:18 crc kubenswrapper[4925]: I0121 11:22:18.142066 4925 generic.go:334] "Generic (PLEG): container finished" podID="0fc9684a-4c42-45e5-91af-6935ebff149d" containerID="9ca41a9bed7c069da2e84f8de9cb4942c4c3e2eaaa352437ea8573ded9caa68e" exitCode=0 Jan 21 11:22:18 crc kubenswrapper[4925]: I0121 11:22:18.142175 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" event={"ID":"0fc9684a-4c42-45e5-91af-6935ebff149d","Type":"ContainerDied","Data":"9ca41a9bed7c069da2e84f8de9cb4942c4c3e2eaaa352437ea8573ded9caa68e"} Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.539265 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.720650 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-config-data\") pod \"0fc9684a-4c42-45e5-91af-6935ebff149d\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.720825 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-combined-ca-bundle\") pod \"0fc9684a-4c42-45e5-91af-6935ebff149d\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.720931 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2l2s\" (UniqueName: \"kubernetes.io/projected/0fc9684a-4c42-45e5-91af-6935ebff149d-kube-api-access-n2l2s\") pod \"0fc9684a-4c42-45e5-91af-6935ebff149d\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.720987 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-db-sync-config-data\") pod \"0fc9684a-4c42-45e5-91af-6935ebff149d\" (UID: \"0fc9684a-4c42-45e5-91af-6935ebff149d\") " Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.728275 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fc9684a-4c42-45e5-91af-6935ebff149d-kube-api-access-n2l2s" (OuterVolumeSpecName: "kube-api-access-n2l2s") pod "0fc9684a-4c42-45e5-91af-6935ebff149d" (UID: "0fc9684a-4c42-45e5-91af-6935ebff149d"). InnerVolumeSpecName "kube-api-access-n2l2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.733672 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0fc9684a-4c42-45e5-91af-6935ebff149d" (UID: "0fc9684a-4c42-45e5-91af-6935ebff149d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.748650 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0fc9684a-4c42-45e5-91af-6935ebff149d" (UID: "0fc9684a-4c42-45e5-91af-6935ebff149d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.773531 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-config-data" (OuterVolumeSpecName: "config-data") pod "0fc9684a-4c42-45e5-91af-6935ebff149d" (UID: "0fc9684a-4c42-45e5-91af-6935ebff149d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.824346 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2l2s\" (UniqueName: \"kubernetes.io/projected/0fc9684a-4c42-45e5-91af-6935ebff149d-kube-api-access-n2l2s\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.824413 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.824426 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:19 crc kubenswrapper[4925]: I0121 11:22:19.824435 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fc9684a-4c42-45e5-91af-6935ebff149d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.166819 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" event={"ID":"0fc9684a-4c42-45e5-91af-6935ebff149d","Type":"ContainerDied","Data":"d097537355a3a0527a8be188f1ae50fb1f372c522a130196af03174c1e38a682"} Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.166872 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d097537355a3a0527a8be188f1ae50fb1f372c522a130196af03174c1e38a682" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.166885 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8" Jan 21 11:22:20 crc kubenswrapper[4925]: E0121 11:22:20.307618 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fc9684a_4c42_45e5_91af_6935ebff149d.slice/crio-d097537355a3a0527a8be188f1ae50fb1f372c522a130196af03174c1e38a682\": RecentStats: unable to find data in memory cache]" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.489153 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:22:20 crc kubenswrapper[4925]: E0121 11:22:20.489967 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="extract-utilities" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.490000 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="extract-utilities" Jan 21 11:22:20 crc kubenswrapper[4925]: E0121 11:22:20.490026 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fc9684a-4c42-45e5-91af-6935ebff149d" containerName="watcher-kuttl-db-sync" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.490033 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fc9684a-4c42-45e5-91af-6935ebff149d" containerName="watcher-kuttl-db-sync" Jan 21 11:22:20 crc kubenswrapper[4925]: E0121 11:22:20.490050 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="registry-server" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.490056 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="registry-server" Jan 21 11:22:20 crc kubenswrapper[4925]: E0121 11:22:20.490071 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="extract-content" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.490077 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="extract-content" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.490240 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bb05265-9c05-42cf-84bb-772dc4393057" containerName="registry-server" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.490265 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fc9684a-4c42-45e5-91af-6935ebff149d" containerName="watcher-kuttl-db-sync" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.491276 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.496994 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.501471 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.502709 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.507226 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.517221 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.517642 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-tkv9d" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.540815 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.610233 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.611489 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.614620 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.621944 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636053 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scm69\" (UniqueName: \"kubernetes.io/projected/9b8cf236-9a90-4cd3-94c6-8e8212205272-kube-api-access-scm69\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636142 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b214982c-972d-4f95-a1fe-0a6e598b2889-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636167 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636226 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636258 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djn8p\" (UniqueName: \"kubernetes.io/projected/b214982c-972d-4f95-a1fe-0a6e598b2889-kube-api-access-djn8p\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636320 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636363 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636386 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b8cf236-9a90-4cd3-94c6-8e8212205272-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636422 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.636448 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738422 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/222d27db-f02b-4c4c-a036-b260d90cfee9-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738502 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scm69\" (UniqueName: \"kubernetes.io/projected/9b8cf236-9a90-4cd3-94c6-8e8212205272-kube-api-access-scm69\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738553 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b214982c-972d-4f95-a1fe-0a6e598b2889-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738590 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738635 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p6bm\" (UniqueName: \"kubernetes.io/projected/222d27db-f02b-4c4c-a036-b260d90cfee9-kube-api-access-8p6bm\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738657 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738679 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738699 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djn8p\" (UniqueName: \"kubernetes.io/projected/b214982c-972d-4f95-a1fe-0a6e598b2889-kube-api-access-djn8p\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738745 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738773 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738790 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b8cf236-9a90-4cd3-94c6-8e8212205272-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738804 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738835 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.738852 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.739323 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b214982c-972d-4f95-a1fe-0a6e598b2889-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.740063 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b8cf236-9a90-4cd3-94c6-8e8212205272-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.745063 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.745988 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.746567 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.749254 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.754921 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.760501 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.763832 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scm69\" (UniqueName: \"kubernetes.io/projected/9b8cf236-9a90-4cd3-94c6-8e8212205272-kube-api-access-scm69\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.767278 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djn8p\" (UniqueName: \"kubernetes.io/projected/b214982c-972d-4f95-a1fe-0a6e598b2889-kube-api-access-djn8p\") pod \"watcher-kuttl-api-0\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.807069 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.818429 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.841113 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.841346 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.841416 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/222d27db-f02b-4c4c-a036-b260d90cfee9-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.841605 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p6bm\" (UniqueName: \"kubernetes.io/projected/222d27db-f02b-4c4c-a036-b260d90cfee9-kube-api-access-8p6bm\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.843915 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/222d27db-f02b-4c4c-a036-b260d90cfee9-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.847908 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.848626 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.872319 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p6bm\" (UniqueName: \"kubernetes.io/projected/222d27db-f02b-4c4c-a036-b260d90cfee9-kube-api-access-8p6bm\") pod \"watcher-kuttl-applier-0\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:20 crc kubenswrapper[4925]: I0121 11:22:20.940924 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:21 crc kubenswrapper[4925]: I0121 11:22:21.195084 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:22:21 crc kubenswrapper[4925]: I0121 11:22:21.432619 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:22:21 crc kubenswrapper[4925]: W0121 11:22:21.432713 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b8cf236_9a90_4cd3_94c6_8e8212205272.slice/crio-23e44d8c24bf3f1a6a87f6934c7b959a4e371b1eee82f52e03392ae71c96d66d WatchSource:0}: Error finding container 23e44d8c24bf3f1a6a87f6934c7b959a4e371b1eee82f52e03392ae71c96d66d: Status 404 returned error can't find the container with id 23e44d8c24bf3f1a6a87f6934c7b959a4e371b1eee82f52e03392ae71c96d66d Jan 21 11:22:21 crc kubenswrapper[4925]: I0121 11:22:21.578350 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:22:22 crc kubenswrapper[4925]: I0121 11:22:22.200627 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b214982c-972d-4f95-a1fe-0a6e598b2889","Type":"ContainerStarted","Data":"17bd771cf66aa9d456160042d57a91c6441aadbab8a31570033823e240685ce6"} Jan 21 11:22:22 crc kubenswrapper[4925]: I0121 11:22:22.200933 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b214982c-972d-4f95-a1fe-0a6e598b2889","Type":"ContainerStarted","Data":"f333c4c5f26da16d3b1cf9dcce5f8e957a6a30bcfa3ab6fb205c5025d790034d"} Jan 21 11:22:22 crc kubenswrapper[4925]: I0121 11:22:22.209696 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"222d27db-f02b-4c4c-a036-b260d90cfee9","Type":"ContainerStarted","Data":"415d4a9664a2bffe6c217b252d7539a791a656e03aa399efe1403be72e061f5c"} Jan 21 11:22:22 crc kubenswrapper[4925]: I0121 11:22:22.210981 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b8cf236-9a90-4cd3-94c6-8e8212205272","Type":"ContainerStarted","Data":"23e44d8c24bf3f1a6a87f6934c7b959a4e371b1eee82f52e03392ae71c96d66d"} Jan 21 11:22:23 crc kubenswrapper[4925]: I0121 11:22:23.219942 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b214982c-972d-4f95-a1fe-0a6e598b2889","Type":"ContainerStarted","Data":"4d7394ab1907a2e6f4f6c5c6034fb06a9f272f2cda72fb6279c59a67473d5cdd"} Jan 21 11:22:23 crc kubenswrapper[4925]: I0121 11:22:23.220591 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:23 crc kubenswrapper[4925]: I0121 11:22:23.247586 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=3.247561799 podStartE2EDuration="3.247561799s" podCreationTimestamp="2026-01-21 11:22:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:22:23.241805806 +0000 UTC m=+1634.845697740" watchObservedRunningTime="2026-01-21 11:22:23.247561799 +0000 UTC m=+1634.851453733" Jan 21 11:22:24 crc kubenswrapper[4925]: I0121 11:22:24.231485 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"222d27db-f02b-4c4c-a036-b260d90cfee9","Type":"ContainerStarted","Data":"c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7"} Jan 21 11:22:24 crc kubenswrapper[4925]: I0121 11:22:24.235212 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b8cf236-9a90-4cd3-94c6-8e8212205272","Type":"ContainerStarted","Data":"3ab653f115f86e28435d88950751d0aebbeef3030dadaf407aac199a46f95ab6"} Jan 21 11:22:24 crc kubenswrapper[4925]: I0121 11:22:24.249435 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.851207964 podStartE2EDuration="4.249412426s" podCreationTimestamp="2026-01-21 11:22:20 +0000 UTC" firstStartedPulling="2026-01-21 11:22:21.582699734 +0000 UTC m=+1633.186591668" lastFinishedPulling="2026-01-21 11:22:22.980904205 +0000 UTC m=+1634.584796130" observedRunningTime="2026-01-21 11:22:24.247757403 +0000 UTC m=+1635.851649337" watchObservedRunningTime="2026-01-21 11:22:24.249412426 +0000 UTC m=+1635.853304370" Jan 21 11:22:24 crc kubenswrapper[4925]: I0121 11:22:24.265000 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.712967391 podStartE2EDuration="4.26497777s" podCreationTimestamp="2026-01-21 11:22:20 +0000 UTC" firstStartedPulling="2026-01-21 11:22:21.434871606 +0000 UTC m=+1633.038763540" lastFinishedPulling="2026-01-21 11:22:22.986881975 +0000 UTC m=+1634.590773919" observedRunningTime="2026-01-21 11:22:24.264688851 +0000 UTC m=+1635.868580785" watchObservedRunningTime="2026-01-21 11:22:24.26497777 +0000 UTC m=+1635.868869704" Jan 21 11:22:25 crc kubenswrapper[4925]: I0121 11:22:25.807269 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:25 crc kubenswrapper[4925]: I0121 11:22:25.807437 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:22:25 crc kubenswrapper[4925]: I0121 11:22:25.941587 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:26 crc kubenswrapper[4925]: I0121 11:22:26.301345 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:30 crc kubenswrapper[4925]: I0121 11:22:30.807453 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:30 crc kubenswrapper[4925]: I0121 11:22:30.812287 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:30 crc kubenswrapper[4925]: I0121 11:22:30.819447 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:30 crc kubenswrapper[4925]: I0121 11:22:30.855874 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:30 crc kubenswrapper[4925]: I0121 11:22:30.941420 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:30 crc kubenswrapper[4925]: I0121 11:22:30.968490 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:31 crc kubenswrapper[4925]: I0121 11:22:31.298062 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:31 crc kubenswrapper[4925]: I0121 11:22:31.304718 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:31 crc kubenswrapper[4925]: I0121 11:22:31.324718 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:31 crc kubenswrapper[4925]: I0121 11:22:31.326466 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.698828 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.699680 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-central-agent" containerID="cri-o://3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07" gracePeriod=30 Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.699759 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="proxy-httpd" containerID="cri-o://90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c" gracePeriod=30 Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.699786 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-notification-agent" containerID="cri-o://fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1" gracePeriod=30 Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.699793 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="sg-core" containerID="cri-o://78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46" gracePeriod=30 Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.874489 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8"] Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.889380 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-qhxk8"] Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.977795 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:22:33 crc kubenswrapper[4925]: I0121 11:22:33.998882 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher7bc3-account-delete-n45h4"] Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.006612 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.018490 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher7bc3-account-delete-n45h4"] Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.034221 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.034767 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="222d27db-f02b-4c4c-a036-b260d90cfee9" containerName="watcher-applier" containerID="cri-o://c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7" gracePeriod=30 Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.094904 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b92vs\" (UniqueName: \"kubernetes.io/projected/c382040c-46ae-4608-8bd0-8d95fcd31ee1-kube-api-access-b92vs\") pod \"watcher7bc3-account-delete-n45h4\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.095033 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c382040c-46ae-4608-8bd0-8d95fcd31ee1-operator-scripts\") pod \"watcher7bc3-account-delete-n45h4\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.104382 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.104718 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-kuttl-api-log" containerID="cri-o://17bd771cf66aa9d456160042d57a91c6441aadbab8a31570033823e240685ce6" gracePeriod=30 Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.105335 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-api" containerID="cri-o://4d7394ab1907a2e6f4f6c5c6034fb06a9f272f2cda72fb6279c59a67473d5cdd" gracePeriod=30 Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.196691 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c382040c-46ae-4608-8bd0-8d95fcd31ee1-operator-scripts\") pod \"watcher7bc3-account-delete-n45h4\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.196815 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b92vs\" (UniqueName: \"kubernetes.io/projected/c382040c-46ae-4608-8bd0-8d95fcd31ee1-kube-api-access-b92vs\") pod \"watcher7bc3-account-delete-n45h4\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.197713 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c382040c-46ae-4608-8bd0-8d95fcd31ee1-operator-scripts\") pod \"watcher7bc3-account-delete-n45h4\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.228102 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b92vs\" (UniqueName: \"kubernetes.io/projected/c382040c-46ae-4608-8bd0-8d95fcd31ee1-kube-api-access-b92vs\") pod \"watcher7bc3-account-delete-n45h4\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.351308 4925 generic.go:334] "Generic (PLEG): container finished" podID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerID="17bd771cf66aa9d456160042d57a91c6441aadbab8a31570033823e240685ce6" exitCode=143 Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.351600 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b214982c-972d-4f95-a1fe-0a6e598b2889","Type":"ContainerDied","Data":"17bd771cf66aa9d456160042d57a91c6441aadbab8a31570033823e240685ce6"} Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.369039 4925 generic.go:334] "Generic (PLEG): container finished" podID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerID="90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c" exitCode=0 Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.369089 4925 generic.go:334] "Generic (PLEG): container finished" podID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerID="78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46" exitCode=2 Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.369353 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="9b8cf236-9a90-4cd3-94c6-8e8212205272" containerName="watcher-decision-engine" containerID="cri-o://3ab653f115f86e28435d88950751d0aebbeef3030dadaf407aac199a46f95ab6" gracePeriod=30 Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.369906 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerDied","Data":"90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c"} Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.369960 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerDied","Data":"78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46"} Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.378719 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:34 crc kubenswrapper[4925]: I0121 11:22:34.913188 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher7bc3-account-delete-n45h4"] Jan 21 11:22:35 crc kubenswrapper[4925]: I0121 11:22:35.381367 4925 generic.go:334] "Generic (PLEG): container finished" podID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerID="4d7394ab1907a2e6f4f6c5c6034fb06a9f272f2cda72fb6279c59a67473d5cdd" exitCode=0 Jan 21 11:22:35 crc kubenswrapper[4925]: I0121 11:22:35.381607 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b214982c-972d-4f95-a1fe-0a6e598b2889","Type":"ContainerDied","Data":"4d7394ab1907a2e6f4f6c5c6034fb06a9f272f2cda72fb6279c59a67473d5cdd"} Jan 21 11:22:35 crc kubenswrapper[4925]: I0121 11:22:35.384659 4925 generic.go:334] "Generic (PLEG): container finished" podID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerID="3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07" exitCode=0 Jan 21 11:22:35 crc kubenswrapper[4925]: I0121 11:22:35.384714 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerDied","Data":"3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07"} Jan 21 11:22:35 crc kubenswrapper[4925]: I0121 11:22:35.395548 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" event={"ID":"c382040c-46ae-4608-8bd0-8d95fcd31ee1","Type":"ContainerStarted","Data":"6ed7159ce6cb22c57d4b99a06fba9688ce7c2a9ffe7957d47a543652c850dd2a"} Jan 21 11:22:35 crc kubenswrapper[4925]: I0121 11:22:35.511771 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fc9684a-4c42-45e5-91af-6935ebff149d" path="/var/lib/kubelet/pods/0fc9684a-4c42-45e5-91af-6935ebff149d/volumes" Jan 21 11:22:35 crc kubenswrapper[4925]: E0121 11:22:35.942571 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7 is running failed: container process not found" containerID="c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:22:35 crc kubenswrapper[4925]: E0121 11:22:35.943066 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7 is running failed: container process not found" containerID="c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:22:35 crc kubenswrapper[4925]: E0121 11:22:35.944898 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7 is running failed: container process not found" containerID="c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:22:35 crc kubenswrapper[4925]: E0121 11:22:35.944956 4925 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7 is running failed: container process not found" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="222d27db-f02b-4c4c-a036-b260d90cfee9" containerName="watcher-applier" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.058123 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.131092 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-config-data\") pod \"b214982c-972d-4f95-a1fe-0a6e598b2889\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.131208 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-combined-ca-bundle\") pod \"b214982c-972d-4f95-a1fe-0a6e598b2889\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.131338 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djn8p\" (UniqueName: \"kubernetes.io/projected/b214982c-972d-4f95-a1fe-0a6e598b2889-kube-api-access-djn8p\") pod \"b214982c-972d-4f95-a1fe-0a6e598b2889\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.131367 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b214982c-972d-4f95-a1fe-0a6e598b2889-logs\") pod \"b214982c-972d-4f95-a1fe-0a6e598b2889\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.131484 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-custom-prometheus-ca\") pod \"b214982c-972d-4f95-a1fe-0a6e598b2889\" (UID: \"b214982c-972d-4f95-a1fe-0a6e598b2889\") " Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.140973 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b214982c-972d-4f95-a1fe-0a6e598b2889-logs" (OuterVolumeSpecName: "logs") pod "b214982c-972d-4f95-a1fe-0a6e598b2889" (UID: "b214982c-972d-4f95-a1fe-0a6e598b2889"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.171619 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b214982c-972d-4f95-a1fe-0a6e598b2889-kube-api-access-djn8p" (OuterVolumeSpecName: "kube-api-access-djn8p") pod "b214982c-972d-4f95-a1fe-0a6e598b2889" (UID: "b214982c-972d-4f95-a1fe-0a6e598b2889"). InnerVolumeSpecName "kube-api-access-djn8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.223322 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "b214982c-972d-4f95-a1fe-0a6e598b2889" (UID: "b214982c-972d-4f95-a1fe-0a6e598b2889"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.260816 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djn8p\" (UniqueName: \"kubernetes.io/projected/b214982c-972d-4f95-a1fe-0a6e598b2889-kube-api-access-djn8p\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.260874 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b214982c-972d-4f95-a1fe-0a6e598b2889-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.260892 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.268579 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b214982c-972d-4f95-a1fe-0a6e598b2889" (UID: "b214982c-972d-4f95-a1fe-0a6e598b2889"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.300636 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-config-data" (OuterVolumeSpecName: "config-data") pod "b214982c-972d-4f95-a1fe-0a6e598b2889" (UID: "b214982c-972d-4f95-a1fe-0a6e598b2889"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.363524 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.363561 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b214982c-972d-4f95-a1fe-0a6e598b2889-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.420116 4925 generic.go:334] "Generic (PLEG): container finished" podID="222d27db-f02b-4c4c-a036-b260d90cfee9" containerID="c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7" exitCode=0 Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.420318 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"222d27db-f02b-4c4c-a036-b260d90cfee9","Type":"ContainerDied","Data":"c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7"} Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.423673 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.423902 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b214982c-972d-4f95-a1fe-0a6e598b2889","Type":"ContainerDied","Data":"f333c4c5f26da16d3b1cf9dcce5f8e957a6a30bcfa3ab6fb205c5025d790034d"} Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.424033 4925 scope.go:117] "RemoveContainer" containerID="4d7394ab1907a2e6f4f6c5c6034fb06a9f272f2cda72fb6279c59a67473d5cdd" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.434608 4925 generic.go:334] "Generic (PLEG): container finished" podID="c382040c-46ae-4608-8bd0-8d95fcd31ee1" containerID="1457d1c02449f1eecdfa9afa3541f9fe55c02e3bd06e080f2b8d421a73dabaf6" exitCode=0 Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.435389 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" event={"ID":"c382040c-46ae-4608-8bd0-8d95fcd31ee1","Type":"ContainerDied","Data":"1457d1c02449f1eecdfa9afa3541f9fe55c02e3bd06e080f2b8d421a73dabaf6"} Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.477254 4925 scope.go:117] "RemoveContainer" containerID="17bd771cf66aa9d456160042d57a91c6441aadbab8a31570033823e240685ce6" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.496258 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.528086 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.749734 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qzsrc"] Jan 21 11:22:36 crc kubenswrapper[4925]: E0121 11:22:36.750558 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-kuttl-api-log" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.750633 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-kuttl-api-log" Jan 21 11:22:36 crc kubenswrapper[4925]: E0121 11:22:36.750709 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-api" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.750759 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-api" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.751044 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-api" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.751121 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-kuttl-api-log" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.752382 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.771372 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qzsrc"] Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.809535 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-utilities\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.809787 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7z6h\" (UniqueName: \"kubernetes.io/projected/88c153ed-9a0f-46fd-a664-bc9a4c94a091-kube-api-access-b7z6h\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.810028 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-catalog-content\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.818032 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.911458 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-catalog-content\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.911556 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-utilities\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.911618 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7z6h\" (UniqueName: \"kubernetes.io/projected/88c153ed-9a0f-46fd-a664-bc9a4c94a091-kube-api-access-b7z6h\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.912101 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-catalog-content\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.912205 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-utilities\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:36 crc kubenswrapper[4925]: I0121 11:22:36.941025 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7z6h\" (UniqueName: \"kubernetes.io/projected/88c153ed-9a0f-46fd-a664-bc9a4c94a091-kube-api-access-b7z6h\") pod \"community-operators-qzsrc\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.012991 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p6bm\" (UniqueName: \"kubernetes.io/projected/222d27db-f02b-4c4c-a036-b260d90cfee9-kube-api-access-8p6bm\") pod \"222d27db-f02b-4c4c-a036-b260d90cfee9\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.013360 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/222d27db-f02b-4c4c-a036-b260d90cfee9-logs\") pod \"222d27db-f02b-4c4c-a036-b260d90cfee9\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.013387 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-combined-ca-bundle\") pod \"222d27db-f02b-4c4c-a036-b260d90cfee9\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.013562 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-config-data\") pod \"222d27db-f02b-4c4c-a036-b260d90cfee9\" (UID: \"222d27db-f02b-4c4c-a036-b260d90cfee9\") " Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.013703 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/222d27db-f02b-4c4c-a036-b260d90cfee9-logs" (OuterVolumeSpecName: "logs") pod "222d27db-f02b-4c4c-a036-b260d90cfee9" (UID: "222d27db-f02b-4c4c-a036-b260d90cfee9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.014290 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/222d27db-f02b-4c4c-a036-b260d90cfee9-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.018465 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/222d27db-f02b-4c4c-a036-b260d90cfee9-kube-api-access-8p6bm" (OuterVolumeSpecName: "kube-api-access-8p6bm") pod "222d27db-f02b-4c4c-a036-b260d90cfee9" (UID: "222d27db-f02b-4c4c-a036-b260d90cfee9"). InnerVolumeSpecName "kube-api-access-8p6bm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.040571 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "222d27db-f02b-4c4c-a036-b260d90cfee9" (UID: "222d27db-f02b-4c4c-a036-b260d90cfee9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.079352 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-config-data" (OuterVolumeSpecName: "config-data") pod "222d27db-f02b-4c4c-a036-b260d90cfee9" (UID: "222d27db-f02b-4c4c-a036-b260d90cfee9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.099856 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.118013 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.118074 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p6bm\" (UniqueName: \"kubernetes.io/projected/222d27db-f02b-4c4c-a036-b260d90cfee9-kube-api-access-8p6bm\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.118088 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222d27db-f02b-4c4c-a036-b260d90cfee9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.453663 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.462805 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"222d27db-f02b-4c4c-a036-b260d90cfee9","Type":"ContainerDied","Data":"415d4a9664a2bffe6c217b252d7539a791a656e03aa399efe1403be72e061f5c"} Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.462889 4925 scope.go:117] "RemoveContainer" containerID="c290b96fb22f5f5373f7772da149e593861256dda345ab87f5541e82ab6c91a7" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.475057 4925 generic.go:334] "Generic (PLEG): container finished" podID="9b8cf236-9a90-4cd3-94c6-8e8212205272" containerID="3ab653f115f86e28435d88950751d0aebbeef3030dadaf407aac199a46f95ab6" exitCode=0 Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.475435 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b8cf236-9a90-4cd3-94c6-8e8212205272","Type":"ContainerDied","Data":"3ab653f115f86e28435d88950751d0aebbeef3030dadaf407aac199a46f95ab6"} Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.535091 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" path="/var/lib/kubelet/pods/b214982c-972d-4f95-a1fe-0a6e598b2889/volumes" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.535915 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.535945 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.741863 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qzsrc"] Jan 21 11:22:37 crc kubenswrapper[4925]: W0121 11:22:37.760005 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88c153ed_9a0f_46fd_a664_bc9a4c94a091.slice/crio-f60fd05ab5859792c7acd996d0311154df014e7b100c392e8ea4d8b3f0e4b923 WatchSource:0}: Error finding container f60fd05ab5859792c7acd996d0311154df014e7b100c392e8ea4d8b3f0e4b923: Status 404 returned error can't find the container with id f60fd05ab5859792c7acd996d0311154df014e7b100c392e8ea4d8b3f0e4b923 Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.821381 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.831627 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-config-data\") pod \"9b8cf236-9a90-4cd3-94c6-8e8212205272\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.831693 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scm69\" (UniqueName: \"kubernetes.io/projected/9b8cf236-9a90-4cd3-94c6-8e8212205272-kube-api-access-scm69\") pod \"9b8cf236-9a90-4cd3-94c6-8e8212205272\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " Jan 21 11:22:37 crc kubenswrapper[4925]: I0121 11:22:37.840984 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b8cf236-9a90-4cd3-94c6-8e8212205272-kube-api-access-scm69" (OuterVolumeSpecName: "kube-api-access-scm69") pod "9b8cf236-9a90-4cd3-94c6-8e8212205272" (UID: "9b8cf236-9a90-4cd3-94c6-8e8212205272"). InnerVolumeSpecName "kube-api-access-scm69". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.016187 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-combined-ca-bundle\") pod \"9b8cf236-9a90-4cd3-94c6-8e8212205272\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.016555 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b8cf236-9a90-4cd3-94c6-8e8212205272-logs\") pod \"9b8cf236-9a90-4cd3-94c6-8e8212205272\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.016603 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-custom-prometheus-ca\") pod \"9b8cf236-9a90-4cd3-94c6-8e8212205272\" (UID: \"9b8cf236-9a90-4cd3-94c6-8e8212205272\") " Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.017287 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scm69\" (UniqueName: \"kubernetes.io/projected/9b8cf236-9a90-4cd3-94c6-8e8212205272-kube-api-access-scm69\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.021530 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b8cf236-9a90-4cd3-94c6-8e8212205272-logs" (OuterVolumeSpecName: "logs") pod "9b8cf236-9a90-4cd3-94c6-8e8212205272" (UID: "9b8cf236-9a90-4cd3-94c6-8e8212205272"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.093806 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b8cf236-9a90-4cd3-94c6-8e8212205272" (UID: "9b8cf236-9a90-4cd3-94c6-8e8212205272"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.096080 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "9b8cf236-9a90-4cd3-94c6-8e8212205272" (UID: "9b8cf236-9a90-4cd3-94c6-8e8212205272"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.119098 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-config-data" (OuterVolumeSpecName: "config-data") pod "9b8cf236-9a90-4cd3-94c6-8e8212205272" (UID: "9b8cf236-9a90-4cd3-94c6-8e8212205272"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.122748 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.122801 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b8cf236-9a90-4cd3-94c6-8e8212205272-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.122815 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.122826 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8cf236-9a90-4cd3-94c6-8e8212205272-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.245960 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.427108 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c382040c-46ae-4608-8bd0-8d95fcd31ee1-operator-scripts\") pod \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.427365 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b92vs\" (UniqueName: \"kubernetes.io/projected/c382040c-46ae-4608-8bd0-8d95fcd31ee1-kube-api-access-b92vs\") pod \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\" (UID: \"c382040c-46ae-4608-8bd0-8d95fcd31ee1\") " Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.428178 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c382040c-46ae-4608-8bd0-8d95fcd31ee1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c382040c-46ae-4608-8bd0-8d95fcd31ee1" (UID: "c382040c-46ae-4608-8bd0-8d95fcd31ee1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.430841 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c382040c-46ae-4608-8bd0-8d95fcd31ee1-kube-api-access-b92vs" (OuterVolumeSpecName: "kube-api-access-b92vs") pod "c382040c-46ae-4608-8bd0-8d95fcd31ee1" (UID: "c382040c-46ae-4608-8bd0-8d95fcd31ee1"). InnerVolumeSpecName "kube-api-access-b92vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.487617 4925 generic.go:334] "Generic (PLEG): container finished" podID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerID="e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16" exitCode=0 Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.487713 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qzsrc" event={"ID":"88c153ed-9a0f-46fd-a664-bc9a4c94a091","Type":"ContainerDied","Data":"e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16"} Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.488019 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qzsrc" event={"ID":"88c153ed-9a0f-46fd-a664-bc9a4c94a091","Type":"ContainerStarted","Data":"f60fd05ab5859792c7acd996d0311154df014e7b100c392e8ea4d8b3f0e4b923"} Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.490965 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"9b8cf236-9a90-4cd3-94c6-8e8212205272","Type":"ContainerDied","Data":"23e44d8c24bf3f1a6a87f6934c7b959a4e371b1eee82f52e03392ae71c96d66d"} Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.491015 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.491111 4925 scope.go:117] "RemoveContainer" containerID="3ab653f115f86e28435d88950751d0aebbeef3030dadaf407aac199a46f95ab6" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.492696 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.492702 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher7bc3-account-delete-n45h4" event={"ID":"c382040c-46ae-4608-8bd0-8d95fcd31ee1","Type":"ContainerDied","Data":"6ed7159ce6cb22c57d4b99a06fba9688ce7c2a9ffe7957d47a543652c850dd2a"} Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.493590 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ed7159ce6cb22c57d4b99a06fba9688ce7c2a9ffe7957d47a543652c850dd2a" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.529482 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c382040c-46ae-4608-8bd0-8d95fcd31ee1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.529819 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b92vs\" (UniqueName: \"kubernetes.io/projected/c382040c-46ae-4608-8bd0-8d95fcd31ee1-kube-api-access-b92vs\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.543280 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:22:38 crc kubenswrapper[4925]: I0121 11:22:38.550871 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:22:39 crc kubenswrapper[4925]: I0121 11:22:39.515708 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="222d27db-f02b-4c4c-a036-b260d90cfee9" path="/var/lib/kubelet/pods/222d27db-f02b-4c4c-a036-b260d90cfee9/volumes" Jan 21 11:22:39 crc kubenswrapper[4925]: I0121 11:22:39.517156 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b8cf236-9a90-4cd3-94c6-8e8212205272" path="/var/lib/kubelet/pods/9b8cf236-9a90-4cd3-94c6-8e8212205272/volumes" Jan 21 11:22:39 crc kubenswrapper[4925]: I0121 11:22:39.517826 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qzsrc" event={"ID":"88c153ed-9a0f-46fd-a664-bc9a4c94a091","Type":"ContainerStarted","Data":"e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a"} Jan 21 11:22:40 crc kubenswrapper[4925]: I0121 11:22:40.518596 4925 generic.go:334] "Generic (PLEG): container finished" podID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerID="e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a" exitCode=0 Jan 21 11:22:40 crc kubenswrapper[4925]: I0121 11:22:40.518642 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qzsrc" event={"ID":"88c153ed-9a0f-46fd-a664-bc9a4c94a091","Type":"ContainerDied","Data":"e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a"} Jan 21 11:22:40 crc kubenswrapper[4925]: I0121 11:22:40.807867 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.131:9322/\": dial tcp 10.217.0.131:9322: i/o timeout (Client.Timeout exceeded while awaiting headers)" Jan 21 11:22:40 crc kubenswrapper[4925]: I0121 11:22:40.808309 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b214982c-972d-4f95-a1fe-0a6e598b2889" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.131:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 11:22:41 crc kubenswrapper[4925]: I0121 11:22:41.570025 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qzsrc" podStartSLOduration=2.720023315 podStartE2EDuration="5.56999903s" podCreationTimestamp="2026-01-21 11:22:36 +0000 UTC" firstStartedPulling="2026-01-21 11:22:38.490646186 +0000 UTC m=+1650.094538120" lastFinishedPulling="2026-01-21 11:22:41.340621901 +0000 UTC m=+1652.944513835" observedRunningTime="2026-01-21 11:22:41.564178975 +0000 UTC m=+1653.168070919" watchObservedRunningTime="2026-01-21 11:22:41.56999903 +0000 UTC m=+1653.173890964" Jan 21 11:22:42 crc kubenswrapper[4925]: I0121 11:22:42.559048 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qzsrc" event={"ID":"88c153ed-9a0f-46fd-a664-bc9a4c94a091","Type":"ContainerStarted","Data":"f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f"} Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.039087 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-mpckb"] Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.063701 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-mpckb"] Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.074641 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m"] Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.100245 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-7bc3-account-create-update-6qv8m"] Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.129247 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher7bc3-account-delete-n45h4"] Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.158772 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher7bc3-account-delete-n45h4"] Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.583064 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.584077 4925 generic.go:334] "Generic (PLEG): container finished" podID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerID="fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1" exitCode=0 Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.584133 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerDied","Data":"fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1"} Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.584175 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb","Type":"ContainerDied","Data":"a5acd6933d088043cbd0d34ddbc8d308014836b53de481d6df80e065abe15ddf"} Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.584194 4925 scope.go:117] "RemoveContainer" containerID="90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.617513 4925 scope.go:117] "RemoveContainer" containerID="78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.644708 4925 scope.go:117] "RemoveContainer" containerID="fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.693145 4925 scope.go:117] "RemoveContainer" containerID="3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.717607 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftd4g\" (UniqueName: \"kubernetes.io/projected/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-kube-api-access-ftd4g\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.717719 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-scripts\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.717772 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-sg-core-conf-yaml\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.717812 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-config-data\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.717841 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-log-httpd\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.717905 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-run-httpd\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.717955 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-combined-ca-bundle\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.718036 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-ceilometer-tls-certs\") pod \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\" (UID: \"d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb\") " Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.718961 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.722711 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.723774 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.724568 4925 scope.go:117] "RemoveContainer" containerID="90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c" Jan 21 11:22:44 crc kubenswrapper[4925]: E0121 11:22:44.738845 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c\": container with ID starting with 90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c not found: ID does not exist" containerID="90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.738916 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c"} err="failed to get container status \"90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c\": rpc error: code = NotFound desc = could not find container \"90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c\": container with ID starting with 90b0da35dd3a3e9e57ebc85d4306db7a58d4a4d9052e00bcc20d7e7401c50a2c not found: ID does not exist" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.738955 4925 scope.go:117] "RemoveContainer" containerID="78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46" Jan 21 11:22:44 crc kubenswrapper[4925]: E0121 11:22:44.740219 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46\": container with ID starting with 78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46 not found: ID does not exist" containerID="78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.740287 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46"} err="failed to get container status \"78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46\": rpc error: code = NotFound desc = could not find container \"78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46\": container with ID starting with 78c1803903ed6b86230364d244e55ca1f1f91fb5ac0d90c3a570c8065029bb46 not found: ID does not exist" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.740340 4925 scope.go:117] "RemoveContainer" containerID="fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1" Jan 21 11:22:44 crc kubenswrapper[4925]: E0121 11:22:44.740737 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1\": container with ID starting with fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1 not found: ID does not exist" containerID="fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.740768 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1"} err="failed to get container status \"fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1\": rpc error: code = NotFound desc = could not find container \"fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1\": container with ID starting with fe12f2bf48ce9e7971f46fbfb1a67946e915be215fee522abd986f60fa51e6a1 not found: ID does not exist" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.740790 4925 scope.go:117] "RemoveContainer" containerID="3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07" Jan 21 11:22:44 crc kubenswrapper[4925]: E0121 11:22:44.741057 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07\": container with ID starting with 3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07 not found: ID does not exist" containerID="3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.741084 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07"} err="failed to get container status \"3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07\": rpc error: code = NotFound desc = could not find container \"3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07\": container with ID starting with 3662ee384876938000308ee4598d2e8c239834bf6331ade985ff4e5a0afc7a07 not found: ID does not exist" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.742963 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-kube-api-access-ftd4g" (OuterVolumeSpecName: "kube-api-access-ftd4g") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "kube-api-access-ftd4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.747764 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.756515 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-scripts" (OuterVolumeSpecName: "scripts") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.825419 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.825451 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.825465 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.825477 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftd4g\" (UniqueName: \"kubernetes.io/projected/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-kube-api-access-ftd4g\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.827632 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.836854 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.860774 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-config-data" (OuterVolumeSpecName: "config-data") pod "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" (UID: "d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.927048 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.927085 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:44 crc kubenswrapper[4925]: I0121 11:22:44.927095 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.513229 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e7f680c-2b41-48df-b73e-815b8afec52a" path="/var/lib/kubelet/pods/1e7f680c-2b41-48df-b73e-815b8afec52a/volumes" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.514565 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37a2f6c8-08e8-4000-84f6-5d639c2def77" path="/var/lib/kubelet/pods/37a2f6c8-08e8-4000-84f6-5d639c2def77/volumes" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.515300 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c382040c-46ae-4608-8bd0-8d95fcd31ee1" path="/var/lib/kubelet/pods/c382040c-46ae-4608-8bd0-8d95fcd31ee1/volumes" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.630871 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.662508 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.672312 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.693494 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:22:45 crc kubenswrapper[4925]: E0121 11:22:45.693965 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-notification-agent" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.693992 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-notification-agent" Jan 21 11:22:45 crc kubenswrapper[4925]: E0121 11:22:45.694019 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="proxy-httpd" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694026 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="proxy-httpd" Jan 21 11:22:45 crc kubenswrapper[4925]: E0121 11:22:45.694042 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8cf236-9a90-4cd3-94c6-8e8212205272" containerName="watcher-decision-engine" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694049 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8cf236-9a90-4cd3-94c6-8e8212205272" containerName="watcher-decision-engine" Jan 21 11:22:45 crc kubenswrapper[4925]: E0121 11:22:45.694059 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="222d27db-f02b-4c4c-a036-b260d90cfee9" containerName="watcher-applier" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694065 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="222d27db-f02b-4c4c-a036-b260d90cfee9" containerName="watcher-applier" Jan 21 11:22:45 crc kubenswrapper[4925]: E0121 11:22:45.694076 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="sg-core" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694082 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="sg-core" Jan 21 11:22:45 crc kubenswrapper[4925]: E0121 11:22:45.694090 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-central-agent" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694096 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-central-agent" Jan 21 11:22:45 crc kubenswrapper[4925]: E0121 11:22:45.694114 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c382040c-46ae-4608-8bd0-8d95fcd31ee1" containerName="mariadb-account-delete" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694120 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c382040c-46ae-4608-8bd0-8d95fcd31ee1" containerName="mariadb-account-delete" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694269 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="proxy-httpd" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694288 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="sg-core" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694297 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-notification-agent" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694305 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b8cf236-9a90-4cd3-94c6-8e8212205272" containerName="watcher-decision-engine" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694311 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c382040c-46ae-4608-8bd0-8d95fcd31ee1" containerName="mariadb-account-delete" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694322 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="222d27db-f02b-4c4c-a036-b260d90cfee9" containerName="watcher-applier" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.694331 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" containerName="ceilometer-central-agent" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.696159 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.701013 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.701589 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.701844 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.717315 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.756912 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.757044 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-scripts\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.757070 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.757138 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-run-httpd\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.757204 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-config-data\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.757248 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m95pn\" (UniqueName: \"kubernetes.io/projected/62f345e0-c206-45f7-91c6-67de05b87130-kube-api-access-m95pn\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.757286 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.757482 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-log-httpd\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.859796 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.859892 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-scripts\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.859930 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.860012 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-run-httpd\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.860063 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-config-data\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.860094 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m95pn\" (UniqueName: \"kubernetes.io/projected/62f345e0-c206-45f7-91c6-67de05b87130-kube-api-access-m95pn\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.860148 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.860203 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-log-httpd\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.860969 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-log-httpd\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.861577 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-run-httpd\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.866041 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.867078 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-config-data\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.867387 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-scripts\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.868705 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.871127 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:45 crc kubenswrapper[4925]: I0121 11:22:45.881095 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m95pn\" (UniqueName: \"kubernetes.io/projected/62f345e0-c206-45f7-91c6-67de05b87130-kube-api-access-m95pn\") pod \"ceilometer-0\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:46 crc kubenswrapper[4925]: I0121 11:22:46.019406 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:46 crc kubenswrapper[4925]: I0121 11:22:46.583905 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:22:46 crc kubenswrapper[4925]: I0121 11:22:46.646477 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerStarted","Data":"65ce3f47db76c730e7dd7b2f61f9b54e4e81dec23628ed236b681dc00b1ad73d"} Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.186127 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.187267 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.265238 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.514622 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb" path="/var/lib/kubelet/pods/d4ac00a7-4c33-466c-b9ac-fff1dc1cfcfb/volumes" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.649192 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-h95vt"] Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.650686 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.702686 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-h95vt"] Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.789528 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerStarted","Data":"cf8cb39acfd7037250c81ab49b6d4663ebb5189e957fe12406f3418be5f2009a"} Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.839976 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-operator-scripts\") pod \"watcher-db-create-h95vt\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.840163 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5w9l\" (UniqueName: \"kubernetes.io/projected/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-kube-api-access-r5w9l\") pod \"watcher-db-create-h95vt\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.854814 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-48bb-account-create-update-g87td"] Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.856292 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.859492 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.863184 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-48bb-account-create-update-g87td"] Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.899204 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.945034 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9znzt\" (UniqueName: \"kubernetes.io/projected/075d7c44-b7c5-4883-8e7a-b8d2036edf88-kube-api-access-9znzt\") pod \"watcher-48bb-account-create-update-g87td\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.945151 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075d7c44-b7c5-4883-8e7a-b8d2036edf88-operator-scripts\") pod \"watcher-48bb-account-create-update-g87td\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.945244 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-operator-scripts\") pod \"watcher-db-create-h95vt\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.945283 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5w9l\" (UniqueName: \"kubernetes.io/projected/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-kube-api-access-r5w9l\") pod \"watcher-db-create-h95vt\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.946791 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-operator-scripts\") pod \"watcher-db-create-h95vt\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:47 crc kubenswrapper[4925]: I0121 11:22:47.978949 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5w9l\" (UniqueName: \"kubernetes.io/projected/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-kube-api-access-r5w9l\") pod \"watcher-db-create-h95vt\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.046720 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9znzt\" (UniqueName: \"kubernetes.io/projected/075d7c44-b7c5-4883-8e7a-b8d2036edf88-kube-api-access-9znzt\") pod \"watcher-48bb-account-create-update-g87td\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.046852 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075d7c44-b7c5-4883-8e7a-b8d2036edf88-operator-scripts\") pod \"watcher-48bb-account-create-update-g87td\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.047802 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075d7c44-b7c5-4883-8e7a-b8d2036edf88-operator-scripts\") pod \"watcher-48bb-account-create-update-g87td\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.066826 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9znzt\" (UniqueName: \"kubernetes.io/projected/075d7c44-b7c5-4883-8e7a-b8d2036edf88-kube-api-access-9znzt\") pod \"watcher-48bb-account-create-update-g87td\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.189307 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.275512 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.596741 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-48bb-account-create-update-g87td"] Jan 21 11:22:48 crc kubenswrapper[4925]: W0121 11:22:48.627262 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod075d7c44_b7c5_4883_8e7a_b8d2036edf88.slice/crio-e5ecb5786c25b867dc667d309ca05c2503a5d4ed9dad2718820bf6d61cc7e6db WatchSource:0}: Error finding container e5ecb5786c25b867dc667d309ca05c2503a5d4ed9dad2718820bf6d61cc7e6db: Status 404 returned error can't find the container with id e5ecb5786c25b867dc667d309ca05c2503a5d4ed9dad2718820bf6d61cc7e6db Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.795227 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" event={"ID":"075d7c44-b7c5-4883-8e7a-b8d2036edf88","Type":"ContainerStarted","Data":"e5ecb5786c25b867dc667d309ca05c2503a5d4ed9dad2718820bf6d61cc7e6db"} Jan 21 11:22:48 crc kubenswrapper[4925]: I0121 11:22:48.803581 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerStarted","Data":"d2b4711af968ebceea401498b9421f51400c45250ef419f87681e94313cad62c"} Jan 21 11:22:49 crc kubenswrapper[4925]: I0121 11:22:49.045443 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-h95vt"] Jan 21 11:22:49 crc kubenswrapper[4925]: W0121 11:22:49.050245 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9162a7aa_c49a_4d3c_90cc_6a504448ffe2.slice/crio-a2223967c553ace7fa711ccd6a2c0249558ea20a9cd19da5b128910cf26ae4ea WatchSource:0}: Error finding container a2223967c553ace7fa711ccd6a2c0249558ea20a9cd19da5b128910cf26ae4ea: Status 404 returned error can't find the container with id a2223967c553ace7fa711ccd6a2c0249558ea20a9cd19da5b128910cf26ae4ea Jan 21 11:22:49 crc kubenswrapper[4925]: I0121 11:22:49.819510 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-h95vt" event={"ID":"9162a7aa-c49a-4d3c-90cc-6a504448ffe2","Type":"ContainerStarted","Data":"6cd55a5f5399cc8bab7fa9585f858c36db33eafc98d1b058a3d8425fed4a13b6"} Jan 21 11:22:49 crc kubenswrapper[4925]: I0121 11:22:49.820991 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-h95vt" event={"ID":"9162a7aa-c49a-4d3c-90cc-6a504448ffe2","Type":"ContainerStarted","Data":"a2223967c553ace7fa711ccd6a2c0249558ea20a9cd19da5b128910cf26ae4ea"} Jan 21 11:22:49 crc kubenswrapper[4925]: I0121 11:22:49.822336 4925 generic.go:334] "Generic (PLEG): container finished" podID="075d7c44-b7c5-4883-8e7a-b8d2036edf88" containerID="fb5d956f3ade8c5da24664b0feab7ccd5bd4a9346b68fea3a684132b2ff7907e" exitCode=0 Jan 21 11:22:49 crc kubenswrapper[4925]: I0121 11:22:49.822429 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" event={"ID":"075d7c44-b7c5-4883-8e7a-b8d2036edf88","Type":"ContainerDied","Data":"fb5d956f3ade8c5da24664b0feab7ccd5bd4a9346b68fea3a684132b2ff7907e"} Jan 21 11:22:49 crc kubenswrapper[4925]: I0121 11:22:49.825121 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerStarted","Data":"231ba32dd6cd6e434fabeb00c23353885ee0543925258cfa868d025e7c68fc36"} Jan 21 11:22:49 crc kubenswrapper[4925]: I0121 11:22:49.847198 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-db-create-h95vt" podStartSLOduration=2.847162627 podStartE2EDuration="2.847162627s" podCreationTimestamp="2026-01-21 11:22:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:22:49.839714851 +0000 UTC m=+1661.443606785" watchObservedRunningTime="2026-01-21 11:22:49.847162627 +0000 UTC m=+1661.451054561" Jan 21 11:22:50 crc kubenswrapper[4925]: I0121 11:22:50.758188 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qzsrc"] Jan 21 11:22:50 crc kubenswrapper[4925]: I0121 11:22:50.838073 4925 generic.go:334] "Generic (PLEG): container finished" podID="9162a7aa-c49a-4d3c-90cc-6a504448ffe2" containerID="6cd55a5f5399cc8bab7fa9585f858c36db33eafc98d1b058a3d8425fed4a13b6" exitCode=0 Jan 21 11:22:50 crc kubenswrapper[4925]: I0121 11:22:50.838148 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-h95vt" event={"ID":"9162a7aa-c49a-4d3c-90cc-6a504448ffe2","Type":"ContainerDied","Data":"6cd55a5f5399cc8bab7fa9585f858c36db33eafc98d1b058a3d8425fed4a13b6"} Jan 21 11:22:50 crc kubenswrapper[4925]: I0121 11:22:50.838447 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qzsrc" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="registry-server" containerID="cri-o://f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f" gracePeriod=2 Jan 21 11:22:51 crc kubenswrapper[4925]: E0121 11:22:51.177719 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88c153ed_9a0f_46fd_a664_bc9a4c94a091.slice/crio-conmon-f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88c153ed_9a0f_46fd_a664_bc9a4c94a091.slice/crio-f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f.scope\": RecentStats: unable to find data in memory cache]" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.396684 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.506383 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9znzt\" (UniqueName: \"kubernetes.io/projected/075d7c44-b7c5-4883-8e7a-b8d2036edf88-kube-api-access-9znzt\") pod \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.506574 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075d7c44-b7c5-4883-8e7a-b8d2036edf88-operator-scripts\") pod \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\" (UID: \"075d7c44-b7c5-4883-8e7a-b8d2036edf88\") " Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.508073 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/075d7c44-b7c5-4883-8e7a-b8d2036edf88-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "075d7c44-b7c5-4883-8e7a-b8d2036edf88" (UID: "075d7c44-b7c5-4883-8e7a-b8d2036edf88"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.523798 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/075d7c44-b7c5-4883-8e7a-b8d2036edf88-kube-api-access-9znzt" (OuterVolumeSpecName: "kube-api-access-9znzt") pod "075d7c44-b7c5-4883-8e7a-b8d2036edf88" (UID: "075d7c44-b7c5-4883-8e7a-b8d2036edf88"). InnerVolumeSpecName "kube-api-access-9znzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.539513 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.608885 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7z6h\" (UniqueName: \"kubernetes.io/projected/88c153ed-9a0f-46fd-a664-bc9a4c94a091-kube-api-access-b7z6h\") pod \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.608955 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-utilities\") pod \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.609008 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-catalog-content\") pod \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\" (UID: \"88c153ed-9a0f-46fd-a664-bc9a4c94a091\") " Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.609286 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075d7c44-b7c5-4883-8e7a-b8d2036edf88-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.609317 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9znzt\" (UniqueName: \"kubernetes.io/projected/075d7c44-b7c5-4883-8e7a-b8d2036edf88-kube-api-access-9znzt\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.609983 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-utilities" (OuterVolumeSpecName: "utilities") pod "88c153ed-9a0f-46fd-a664-bc9a4c94a091" (UID: "88c153ed-9a0f-46fd-a664-bc9a4c94a091"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.725006 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.747414 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88c153ed-9a0f-46fd-a664-bc9a4c94a091-kube-api-access-b7z6h" (OuterVolumeSpecName: "kube-api-access-b7z6h") pod "88c153ed-9a0f-46fd-a664-bc9a4c94a091" (UID: "88c153ed-9a0f-46fd-a664-bc9a4c94a091"). InnerVolumeSpecName "kube-api-access-b7z6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.778039 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88c153ed-9a0f-46fd-a664-bc9a4c94a091" (UID: "88c153ed-9a0f-46fd-a664-bc9a4c94a091"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.826368 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88c153ed-9a0f-46fd-a664-bc9a4c94a091-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.826442 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7z6h\" (UniqueName: \"kubernetes.io/projected/88c153ed-9a0f-46fd-a664-bc9a4c94a091-kube-api-access-b7z6h\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.897710 4925 generic.go:334] "Generic (PLEG): container finished" podID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerID="f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f" exitCode=0 Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.897842 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qzsrc" event={"ID":"88c153ed-9a0f-46fd-a664-bc9a4c94a091","Type":"ContainerDied","Data":"f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f"} Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.897877 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qzsrc" event={"ID":"88c153ed-9a0f-46fd-a664-bc9a4c94a091","Type":"ContainerDied","Data":"f60fd05ab5859792c7acd996d0311154df014e7b100c392e8ea4d8b3f0e4b923"} Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.897920 4925 scope.go:117] "RemoveContainer" containerID="f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.902040 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qzsrc" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.911218 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" event={"ID":"075d7c44-b7c5-4883-8e7a-b8d2036edf88","Type":"ContainerDied","Data":"e5ecb5786c25b867dc667d309ca05c2503a5d4ed9dad2718820bf6d61cc7e6db"} Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.911261 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5ecb5786c25b867dc667d309ca05c2503a5d4ed9dad2718820bf6d61cc7e6db" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.911439 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-48bb-account-create-update-g87td" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.921701 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerStarted","Data":"fcc323ff03e5bb84a07c86d43322fadefbc533649579291fb92145e8f57f3512"} Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.946240 4925 scope.go:117] "RemoveContainer" containerID="e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a" Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.948195 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qzsrc"] Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.980130 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qzsrc"] Jan 21 11:22:51 crc kubenswrapper[4925]: I0121 11:22:51.982552 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=3.021923455 podStartE2EDuration="6.982535514s" podCreationTimestamp="2026-01-21 11:22:45 +0000 UTC" firstStartedPulling="2026-01-21 11:22:46.60628724 +0000 UTC m=+1658.210179174" lastFinishedPulling="2026-01-21 11:22:50.566899299 +0000 UTC m=+1662.170791233" observedRunningTime="2026-01-21 11:22:51.973441055 +0000 UTC m=+1663.577333009" watchObservedRunningTime="2026-01-21 11:22:51.982535514 +0000 UTC m=+1663.586427448" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.001660 4925 scope.go:117] "RemoveContainer" containerID="e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.040236 4925 scope.go:117] "RemoveContainer" containerID="f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f" Jan 21 11:22:52 crc kubenswrapper[4925]: E0121 11:22:52.040927 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f\": container with ID starting with f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f not found: ID does not exist" containerID="f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.040975 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f"} err="failed to get container status \"f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f\": rpc error: code = NotFound desc = could not find container \"f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f\": container with ID starting with f553c96823c918dc32a5e1731f69a3d7cabd0ade27c778f54e3956732679831f not found: ID does not exist" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.041007 4925 scope.go:117] "RemoveContainer" containerID="e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a" Jan 21 11:22:52 crc kubenswrapper[4925]: E0121 11:22:52.041618 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a\": container with ID starting with e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a not found: ID does not exist" containerID="e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.041688 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a"} err="failed to get container status \"e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a\": rpc error: code = NotFound desc = could not find container \"e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a\": container with ID starting with e01817a4c8a2567e303e4009597a84ee1294917194958ed279d498a67a281c2a not found: ID does not exist" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.041736 4925 scope.go:117] "RemoveContainer" containerID="e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16" Jan 21 11:22:52 crc kubenswrapper[4925]: E0121 11:22:52.042196 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16\": container with ID starting with e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16 not found: ID does not exist" containerID="e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.042250 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16"} err="failed to get container status \"e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16\": rpc error: code = NotFound desc = could not find container \"e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16\": container with ID starting with e4a6d49b95bf728c9b3f49436c099f0032dbfe74e55dacab637636405b502e16 not found: ID does not exist" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.377718 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.548812 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5w9l\" (UniqueName: \"kubernetes.io/projected/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-kube-api-access-r5w9l\") pod \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.549118 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-operator-scripts\") pod \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\" (UID: \"9162a7aa-c49a-4d3c-90cc-6a504448ffe2\") " Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.550076 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9162a7aa-c49a-4d3c-90cc-6a504448ffe2" (UID: "9162a7aa-c49a-4d3c-90cc-6a504448ffe2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.556510 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-kube-api-access-r5w9l" (OuterVolumeSpecName: "kube-api-access-r5w9l") pod "9162a7aa-c49a-4d3c-90cc-6a504448ffe2" (UID: "9162a7aa-c49a-4d3c-90cc-6a504448ffe2"). InnerVolumeSpecName "kube-api-access-r5w9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.651526 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.652361 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5w9l\" (UniqueName: \"kubernetes.io/projected/9162a7aa-c49a-4d3c-90cc-6a504448ffe2-kube-api-access-r5w9l\") on node \"crc\" DevicePath \"\"" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.943835 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-h95vt" event={"ID":"9162a7aa-c49a-4d3c-90cc-6a504448ffe2","Type":"ContainerDied","Data":"a2223967c553ace7fa711ccd6a2c0249558ea20a9cd19da5b128910cf26ae4ea"} Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.943900 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2223967c553ace7fa711ccd6a2c0249558ea20a9cd19da5b128910cf26ae4ea" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.943927 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-h95vt" Jan 21 11:22:52 crc kubenswrapper[4925]: I0121 11:22:52.959252 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:22:53 crc kubenswrapper[4925]: I0121 11:22:53.512561 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" path="/var/lib/kubelet/pods/88c153ed-9a0f-46fd-a664-bc9a4c94a091/volumes" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.043783 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj"] Jan 21 11:22:58 crc kubenswrapper[4925]: E0121 11:22:58.044541 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="registry-server" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044559 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="registry-server" Jan 21 11:22:58 crc kubenswrapper[4925]: E0121 11:22:58.044587 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="extract-content" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044593 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="extract-content" Jan 21 11:22:58 crc kubenswrapper[4925]: E0121 11:22:58.044609 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075d7c44-b7c5-4883-8e7a-b8d2036edf88" containerName="mariadb-account-create-update" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044615 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="075d7c44-b7c5-4883-8e7a-b8d2036edf88" containerName="mariadb-account-create-update" Jan 21 11:22:58 crc kubenswrapper[4925]: E0121 11:22:58.044627 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="extract-utilities" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044634 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="extract-utilities" Jan 21 11:22:58 crc kubenswrapper[4925]: E0121 11:22:58.044644 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9162a7aa-c49a-4d3c-90cc-6a504448ffe2" containerName="mariadb-database-create" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044650 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9162a7aa-c49a-4d3c-90cc-6a504448ffe2" containerName="mariadb-database-create" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044800 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9162a7aa-c49a-4d3c-90cc-6a504448ffe2" containerName="mariadb-database-create" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044812 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="075d7c44-b7c5-4883-8e7a-b8d2036edf88" containerName="mariadb-account-create-update" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.044826 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="88c153ed-9a0f-46fd-a664-bc9a4c94a091" containerName="registry-server" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.045580 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.049248 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-pj7kw" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.050110 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.053287 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnn4w\" (UniqueName: \"kubernetes.io/projected/da55624c-e330-4a8c-b9a5-b07d40ff06ff-kube-api-access-fnn4w\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.053336 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-config-data\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.053542 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.053631 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-db-sync-config-data\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.073388 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj"] Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.155373 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-db-sync-config-data\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.155543 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnn4w\" (UniqueName: \"kubernetes.io/projected/da55624c-e330-4a8c-b9a5-b07d40ff06ff-kube-api-access-fnn4w\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.155571 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-config-data\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.157014 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.162558 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-db-sync-config-data\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.164469 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.165236 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-config-data\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.181325 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnn4w\" (UniqueName: \"kubernetes.io/projected/da55624c-e330-4a8c-b9a5-b07d40ff06ff-kube-api-access-fnn4w\") pod \"watcher-kuttl-db-sync-ss2lj\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.365606 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:22:58 crc kubenswrapper[4925]: I0121 11:22:58.979334 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj"] Jan 21 11:22:59 crc kubenswrapper[4925]: I0121 11:22:59.140874 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" event={"ID":"da55624c-e330-4a8c-b9a5-b07d40ff06ff","Type":"ContainerStarted","Data":"92ff422c65509abb3fdcaf3ac73c9798adfe2d0fcbef2f9863e4d1272a3a54c5"} Jan 21 11:23:00 crc kubenswrapper[4925]: I0121 11:23:00.152798 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" event={"ID":"da55624c-e330-4a8c-b9a5-b07d40ff06ff","Type":"ContainerStarted","Data":"406322ec23c25f5353fb536582332bf28b383f3958044207ddeeec43e03be3ee"} Jan 21 11:23:00 crc kubenswrapper[4925]: I0121 11:23:00.177152 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" podStartSLOduration=2.177125007 podStartE2EDuration="2.177125007s" podCreationTimestamp="2026-01-21 11:22:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:23:00.176313522 +0000 UTC m=+1671.780205466" watchObservedRunningTime="2026-01-21 11:23:00.177125007 +0000 UTC m=+1671.781016941" Jan 21 11:23:03 crc kubenswrapper[4925]: I0121 11:23:03.184811 4925 generic.go:334] "Generic (PLEG): container finished" podID="da55624c-e330-4a8c-b9a5-b07d40ff06ff" containerID="406322ec23c25f5353fb536582332bf28b383f3958044207ddeeec43e03be3ee" exitCode=0 Jan 21 11:23:03 crc kubenswrapper[4925]: I0121 11:23:03.184919 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" event={"ID":"da55624c-e330-4a8c-b9a5-b07d40ff06ff","Type":"ContainerDied","Data":"406322ec23c25f5353fb536582332bf28b383f3958044207ddeeec43e03be3ee"} Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.089453 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.210006 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" event={"ID":"da55624c-e330-4a8c-b9a5-b07d40ff06ff","Type":"ContainerDied","Data":"92ff422c65509abb3fdcaf3ac73c9798adfe2d0fcbef2f9863e4d1272a3a54c5"} Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.210348 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92ff422c65509abb3fdcaf3ac73c9798adfe2d0fcbef2f9863e4d1272a3a54c5" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.210119 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.267298 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-config-data\") pod \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.271021 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-combined-ca-bundle\") pod \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.271118 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnn4w\" (UniqueName: \"kubernetes.io/projected/da55624c-e330-4a8c-b9a5-b07d40ff06ff-kube-api-access-fnn4w\") pod \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.271410 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-db-sync-config-data\") pod \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\" (UID: \"da55624c-e330-4a8c-b9a5-b07d40ff06ff\") " Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.292602 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "da55624c-e330-4a8c-b9a5-b07d40ff06ff" (UID: "da55624c-e330-4a8c-b9a5-b07d40ff06ff"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.294842 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da55624c-e330-4a8c-b9a5-b07d40ff06ff-kube-api-access-fnn4w" (OuterVolumeSpecName: "kube-api-access-fnn4w") pod "da55624c-e330-4a8c-b9a5-b07d40ff06ff" (UID: "da55624c-e330-4a8c-b9a5-b07d40ff06ff"). InnerVolumeSpecName "kube-api-access-fnn4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.337201 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-config-data" (OuterVolumeSpecName: "config-data") pod "da55624c-e330-4a8c-b9a5-b07d40ff06ff" (UID: "da55624c-e330-4a8c-b9a5-b07d40ff06ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.352759 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da55624c-e330-4a8c-b9a5-b07d40ff06ff" (UID: "da55624c-e330-4a8c-b9a5-b07d40ff06ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.394461 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.394507 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.394524 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnn4w\" (UniqueName: \"kubernetes.io/projected/da55624c-e330-4a8c-b9a5-b07d40ff06ff-kube-api-access-fnn4w\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.394535 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da55624c-e330-4a8c-b9a5-b07d40ff06ff-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.716774 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:05 crc kubenswrapper[4925]: E0121 11:23:05.717221 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da55624c-e330-4a8c-b9a5-b07d40ff06ff" containerName="watcher-kuttl-db-sync" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.717240 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="da55624c-e330-4a8c-b9a5-b07d40ff06ff" containerName="watcher-kuttl-db-sync" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.717436 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="da55624c-e330-4a8c-b9a5-b07d40ff06ff" containerName="watcher-kuttl-db-sync" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.718413 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.724803 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-pj7kw" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.725213 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.725832 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.729682 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.741571 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.762169 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.763900 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.767172 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.783454 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.843230 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.844910 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.851251 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.881146 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.890100 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-logs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.890158 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.890963 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891002 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891050 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891082 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9vf6\" (UniqueName: \"kubernetes.io/projected/a6221ad8-c88f-4c28-a38d-21182311acf0-kube-api-access-t9vf6\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891134 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6221ad8-c88f-4c28-a38d-21182311acf0-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891165 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891184 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891214 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkhcb\" (UniqueName: \"kubernetes.io/projected/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-kube-api-access-qkhcb\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891254 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.891281 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.992772 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.992820 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.992850 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjfpw\" (UniqueName: \"kubernetes.io/projected/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-kube-api-access-tjfpw\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993185 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6221ad8-c88f-4c28-a38d-21182311acf0-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993268 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993298 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993361 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkhcb\" (UniqueName: \"kubernetes.io/projected/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-kube-api-access-qkhcb\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993422 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993466 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993508 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-logs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993549 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993600 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993660 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993714 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993997 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.994067 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9vf6\" (UniqueName: \"kubernetes.io/projected/a6221ad8-c88f-4c28-a38d-21182311acf0-kube-api-access-t9vf6\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.993716 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6221ad8-c88f-4c28-a38d-21182311acf0-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:05 crc kubenswrapper[4925]: I0121 11:23:05.994365 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-logs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.001156 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.009692 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.013279 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.013321 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.013803 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.014096 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkhcb\" (UniqueName: \"kubernetes.io/projected/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-kube-api-access-qkhcb\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.014277 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.016412 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.020930 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9vf6\" (UniqueName: \"kubernetes.io/projected/a6221ad8-c88f-4c28-a38d-21182311acf0-kube-api-access-t9vf6\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.022360 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.050004 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.095274 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.095335 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.095364 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjfpw\" (UniqueName: \"kubernetes.io/projected/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-kube-api-access-tjfpw\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.095484 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.096952 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.100768 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.101987 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.103815 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.120307 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjfpw\" (UniqueName: \"kubernetes.io/projected/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-kube-api-access-tjfpw\") pod \"watcher-kuttl-applier-0\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.172569 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.771192 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.861872 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:06 crc kubenswrapper[4925]: W0121 11:23:06.875443 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6221ad8_c88f_4c28_a38d_21182311acf0.slice/crio-a9e359a415c54a03268421816a25602c0a59dbeb092867ac5083d8830c7164e2 WatchSource:0}: Error finding container a9e359a415c54a03268421816a25602c0a59dbeb092867ac5083d8830c7164e2: Status 404 returned error can't find the container with id a9e359a415c54a03268421816a25602c0a59dbeb092867ac5083d8830c7164e2 Jan 21 11:23:06 crc kubenswrapper[4925]: I0121 11:23:06.968016 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:07 crc kubenswrapper[4925]: I0121 11:23:07.256555 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56","Type":"ContainerStarted","Data":"e6da232322ec7060da1c1ccbf46312098c077683670781750f300e61da8f203a"} Jan 21 11:23:07 crc kubenswrapper[4925]: I0121 11:23:07.264809 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a6221ad8-c88f-4c28-a38d-21182311acf0","Type":"ContainerStarted","Data":"a9e359a415c54a03268421816a25602c0a59dbeb092867ac5083d8830c7164e2"} Jan 21 11:23:07 crc kubenswrapper[4925]: I0121 11:23:07.267975 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"d216b681-7f33-4a4f-b938-f9cb5b01bbd2","Type":"ContainerStarted","Data":"848caac6e347b26a9030f8c75b507cb9b90aa166a7d9b016127eeb38e57d236e"} Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.279456 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56","Type":"ContainerStarted","Data":"d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60"} Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.282216 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a6221ad8-c88f-4c28-a38d-21182311acf0","Type":"ContainerStarted","Data":"1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa"} Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.284201 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"d216b681-7f33-4a4f-b938-f9cb5b01bbd2","Type":"ContainerStarted","Data":"045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352"} Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.284237 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"d216b681-7f33-4a4f-b938-f9cb5b01bbd2","Type":"ContainerStarted","Data":"412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f"} Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.285558 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.309490 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=3.309467772 podStartE2EDuration="3.309467772s" podCreationTimestamp="2026-01-21 11:23:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:23:08.30656901 +0000 UTC m=+1679.910460954" watchObservedRunningTime="2026-01-21 11:23:08.309467772 +0000 UTC m=+1679.913359706" Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.339260 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=3.3392298289999998 podStartE2EDuration="3.339229829s" podCreationTimestamp="2026-01-21 11:23:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:23:08.333667682 +0000 UTC m=+1679.937559626" watchObservedRunningTime="2026-01-21 11:23:08.339229829 +0000 UTC m=+1679.943121763" Jan 21 11:23:08 crc kubenswrapper[4925]: I0121 11:23:08.365645 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=3.365603746 podStartE2EDuration="3.365603746s" podCreationTimestamp="2026-01-21 11:23:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:23:08.360838285 +0000 UTC m=+1679.964730239" watchObservedRunningTime="2026-01-21 11:23:08.365603746 +0000 UTC m=+1679.969495680" Jan 21 11:23:10 crc kubenswrapper[4925]: I0121 11:23:10.302109 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:23:11 crc kubenswrapper[4925]: I0121 11:23:11.050938 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:11 crc kubenswrapper[4925]: I0121 11:23:11.172834 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:11 crc kubenswrapper[4925]: I0121 11:23:11.317443 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:23:12 crc kubenswrapper[4925]: I0121 11:23:12.139215 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.030287 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.050843 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.071387 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.102290 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.152796 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.177112 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.206027 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.364534 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.372085 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.518058 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:16 crc kubenswrapper[4925]: I0121 11:23:16.522718 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:19 crc kubenswrapper[4925]: I0121 11:23:19.941526 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:23:19 crc kubenswrapper[4925]: I0121 11:23:19.941899 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:23:20 crc kubenswrapper[4925]: I0121 11:23:20.570168 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:20 crc kubenswrapper[4925]: I0121 11:23:20.570595 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-central-agent" containerID="cri-o://cf8cb39acfd7037250c81ab49b6d4663ebb5189e957fe12406f3418be5f2009a" gracePeriod=30 Jan 21 11:23:20 crc kubenswrapper[4925]: I0121 11:23:20.570799 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="sg-core" containerID="cri-o://231ba32dd6cd6e434fabeb00c23353885ee0543925258cfa868d025e7c68fc36" gracePeriod=30 Jan 21 11:23:20 crc kubenswrapper[4925]: I0121 11:23:20.570838 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="proxy-httpd" containerID="cri-o://fcc323ff03e5bb84a07c86d43322fadefbc533649579291fb92145e8f57f3512" gracePeriod=30 Jan 21 11:23:20 crc kubenswrapper[4925]: I0121 11:23:20.570828 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-notification-agent" containerID="cri-o://d2b4711af968ebceea401498b9421f51400c45250ef419f87681e94313cad62c" gracePeriod=30 Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.053099 4925 generic.go:334] "Generic (PLEG): container finished" podID="62f345e0-c206-45f7-91c6-67de05b87130" containerID="fcc323ff03e5bb84a07c86d43322fadefbc533649579291fb92145e8f57f3512" exitCode=0 Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.053528 4925 generic.go:334] "Generic (PLEG): container finished" podID="62f345e0-c206-45f7-91c6-67de05b87130" containerID="231ba32dd6cd6e434fabeb00c23353885ee0543925258cfa868d025e7c68fc36" exitCode=2 Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.053545 4925 generic.go:334] "Generic (PLEG): container finished" podID="62f345e0-c206-45f7-91c6-67de05b87130" containerID="cf8cb39acfd7037250c81ab49b6d4663ebb5189e957fe12406f3418be5f2009a" exitCode=0 Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.053177 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerDied","Data":"fcc323ff03e5bb84a07c86d43322fadefbc533649579291fb92145e8f57f3512"} Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.053591 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerDied","Data":"231ba32dd6cd6e434fabeb00c23353885ee0543925258cfa868d025e7c68fc36"} Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.053611 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerDied","Data":"cf8cb39acfd7037250c81ab49b6d4663ebb5189e957fe12406f3418be5f2009a"} Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.752512 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.753611 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-kuttl-api-log" containerID="cri-o://412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f" gracePeriod=30 Jan 21 11:23:22 crc kubenswrapper[4925]: I0121 11:23:22.804276 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-api" containerID="cri-o://045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352" gracePeriod=30 Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.059031 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-dqjpf" podUID="dbe9a043-a969-429b-b7b1-33d12296c52c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.83:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.089906 4925 generic.go:334] "Generic (PLEG): container finished" podID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerID="412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f" exitCode=143 Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.089971 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"d216b681-7f33-4a4f-b938-f9cb5b01bbd2","Type":"ContainerDied","Data":"412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f"} Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.098591 4925 generic.go:334] "Generic (PLEG): container finished" podID="62f345e0-c206-45f7-91c6-67de05b87130" containerID="d2b4711af968ebceea401498b9421f51400c45250ef419f87681e94313cad62c" exitCode=0 Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.098631 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerDied","Data":"d2b4711af968ebceea401498b9421f51400c45250ef419f87681e94313cad62c"} Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.296188 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.592492 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-ceilometer-tls-certs\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.592537 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-log-httpd\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.592606 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-sg-core-conf-yaml\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.592920 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-scripts\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.592974 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-config-data\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.592994 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-combined-ca-bundle\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.593085 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m95pn\" (UniqueName: \"kubernetes.io/projected/62f345e0-c206-45f7-91c6-67de05b87130-kube-api-access-m95pn\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.593110 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-run-httpd\") pod \"62f345e0-c206-45f7-91c6-67de05b87130\" (UID: \"62f345e0-c206-45f7-91c6-67de05b87130\") " Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.594149 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.595703 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.601765 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62f345e0-c206-45f7-91c6-67de05b87130-kube-api-access-m95pn" (OuterVolumeSpecName: "kube-api-access-m95pn") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "kube-api-access-m95pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.609772 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-scripts" (OuterVolumeSpecName: "scripts") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.656672 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.695307 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.695350 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.695365 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m95pn\" (UniqueName: \"kubernetes.io/projected/62f345e0-c206-45f7-91c6-67de05b87130-kube-api-access-m95pn\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.695380 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.695408 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62f345e0-c206-45f7-91c6-67de05b87130-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.698515 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.705726 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.787789 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-config-data" (OuterVolumeSpecName: "config-data") pod "62f345e0-c206-45f7-91c6-67de05b87130" (UID: "62f345e0-c206-45f7-91c6-67de05b87130"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.800700 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.800745 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.800758 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f345e0-c206-45f7-91c6-67de05b87130-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:24 crc kubenswrapper[4925]: I0121 11:23:24.925771 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.169108 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-custom-prometheus-ca\") pod \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.169217 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkhcb\" (UniqueName: \"kubernetes.io/projected/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-kube-api-access-qkhcb\") pod \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.169353 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-public-tls-certs\") pod \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.169558 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-logs\") pod \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.169914 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-combined-ca-bundle\") pod \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.169952 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-config-data\") pod \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.169978 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-internal-tls-certs\") pod \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\" (UID: \"d216b681-7f33-4a4f-b938-f9cb5b01bbd2\") " Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.178847 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-logs" (OuterVolumeSpecName: "logs") pod "d216b681-7f33-4a4f-b938-f9cb5b01bbd2" (UID: "d216b681-7f33-4a4f-b938-f9cb5b01bbd2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.183359 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-kube-api-access-qkhcb" (OuterVolumeSpecName: "kube-api-access-qkhcb") pod "d216b681-7f33-4a4f-b938-f9cb5b01bbd2" (UID: "d216b681-7f33-4a4f-b938-f9cb5b01bbd2"). InnerVolumeSpecName "kube-api-access-qkhcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.188305 4925 generic.go:334] "Generic (PLEG): container finished" podID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerID="045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352" exitCode=0 Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.188373 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"d216b681-7f33-4a4f-b938-f9cb5b01bbd2","Type":"ContainerDied","Data":"045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352"} Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.188423 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"d216b681-7f33-4a4f-b938-f9cb5b01bbd2","Type":"ContainerDied","Data":"848caac6e347b26a9030f8c75b507cb9b90aa166a7d9b016127eeb38e57d236e"} Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.188446 4925 scope.go:117] "RemoveContainer" containerID="045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.188619 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.194606 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"62f345e0-c206-45f7-91c6-67de05b87130","Type":"ContainerDied","Data":"65ce3f47db76c730e7dd7b2f61f9b54e4e81dec23628ed236b681dc00b1ad73d"} Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.194702 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.207035 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "d216b681-7f33-4a4f-b938-f9cb5b01bbd2" (UID: "d216b681-7f33-4a4f-b938-f9cb5b01bbd2"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.228507 4925 scope.go:117] "RemoveContainer" containerID="412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.245437 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.257745 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.259286 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d216b681-7f33-4a4f-b938-f9cb5b01bbd2" (UID: "d216b681-7f33-4a4f-b938-f9cb5b01bbd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.259520 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d216b681-7f33-4a4f-b938-f9cb5b01bbd2" (UID: "d216b681-7f33-4a4f-b938-f9cb5b01bbd2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.272335 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.272374 4925 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.272387 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.272422 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkhcb\" (UniqueName: \"kubernetes.io/projected/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-kube-api-access-qkhcb\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.272437 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.281671 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.282243 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="sg-core" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.282315 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="sg-core" Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.282386 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-api" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.282526 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-api" Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.282938 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-central-agent" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.283032 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-central-agent" Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.283156 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-kuttl-api-log" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.283248 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-kuttl-api-log" Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.283348 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-notification-agent" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.283856 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-notification-agent" Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.283955 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="proxy-httpd" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.284034 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="proxy-httpd" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.284409 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="proxy-httpd" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.284692 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="sg-core" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.284811 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-kuttl-api-log" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.284923 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" containerName="watcher-api" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.285044 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-notification-agent" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.285130 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f345e0-c206-45f7-91c6-67de05b87130" containerName="ceilometer-central-agent" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.283744 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d216b681-7f33-4a4f-b938-f9cb5b01bbd2" (UID: "d216b681-7f33-4a4f-b938-f9cb5b01bbd2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.288463 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.296163 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.296510 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.296757 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.303262 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.311581 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-config-data" (OuterVolumeSpecName: "config-data") pod "d216b681-7f33-4a4f-b938-f9cb5b01bbd2" (UID: "d216b681-7f33-4a4f-b938-f9cb5b01bbd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.361685 4925 scope.go:117] "RemoveContainer" containerID="045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352" Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.362325 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352\": container with ID starting with 045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352 not found: ID does not exist" containerID="045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.362374 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352"} err="failed to get container status \"045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352\": rpc error: code = NotFound desc = could not find container \"045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352\": container with ID starting with 045018a5f18755a3f1165c746d00d34e6406390dc811aeab2689bd5d10820352 not found: ID does not exist" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.362445 4925 scope.go:117] "RemoveContainer" containerID="412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f" Jan 21 11:23:25 crc kubenswrapper[4925]: E0121 11:23:25.362926 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f\": container with ID starting with 412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f not found: ID does not exist" containerID="412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.362965 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f"} err="failed to get container status \"412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f\": rpc error: code = NotFound desc = could not find container \"412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f\": container with ID starting with 412dd5380a0b9f44ee5fe3f7007a83bc0de409e7e496aa40291ee31f42d2790f not found: ID does not exist" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.362990 4925 scope.go:117] "RemoveContainer" containerID="fcc323ff03e5bb84a07c86d43322fadefbc533649579291fb92145e8f57f3512" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.373934 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-log-httpd\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374032 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-config-data\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374069 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q58h4\" (UniqueName: \"kubernetes.io/projected/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-kube-api-access-q58h4\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374096 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374144 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374206 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-scripts\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374241 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-run-httpd\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374269 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374326 4925 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.374337 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d216b681-7f33-4a4f-b938-f9cb5b01bbd2-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.386633 4925 scope.go:117] "RemoveContainer" containerID="231ba32dd6cd6e434fabeb00c23353885ee0543925258cfa868d025e7c68fc36" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.415026 4925 scope.go:117] "RemoveContainer" containerID="d2b4711af968ebceea401498b9421f51400c45250ef419f87681e94313cad62c" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.445448 4925 scope.go:117] "RemoveContainer" containerID="cf8cb39acfd7037250c81ab49b6d4663ebb5189e957fe12406f3418be5f2009a" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.476233 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-run-httpd\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.476711 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.476934 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-log-httpd\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.477144 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-run-httpd\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.477520 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-log-httpd\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.477986 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-config-data\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.479264 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q58h4\" (UniqueName: \"kubernetes.io/projected/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-kube-api-access-q58h4\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.479462 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.480087 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.480368 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-scripts\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.484290 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.484480 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.484672 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-config-data\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.484932 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.487893 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-scripts\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.509301 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q58h4\" (UniqueName: \"kubernetes.io/projected/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-kube-api-access-q58h4\") pod \"ceilometer-0\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.528104 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62f345e0-c206-45f7-91c6-67de05b87130" path="/var/lib/kubelet/pods/62f345e0-c206-45f7-91c6-67de05b87130/volumes" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.588477 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.595045 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.617405 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.619041 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.622042 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.622447 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.622659 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.665357 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.665636 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.685789 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912a7d7a-2aa2-4982-bfc9-f133216441a4-logs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.685910 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.685954 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.685993 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.686071 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.702096 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-795mq\" (UniqueName: \"kubernetes.io/projected/912a7d7a-2aa2-4982-bfc9-f133216441a4-kube-api-access-795mq\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.702223 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.804498 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.804590 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.804670 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.804891 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-795mq\" (UniqueName: \"kubernetes.io/projected/912a7d7a-2aa2-4982-bfc9-f133216441a4-kube-api-access-795mq\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.804966 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.805023 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912a7d7a-2aa2-4982-bfc9-f133216441a4-logs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.805123 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.806010 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912a7d7a-2aa2-4982-bfc9-f133216441a4-logs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.810384 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.810855 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.811199 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.811383 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.812850 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.830691 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-795mq\" (UniqueName: \"kubernetes.io/projected/912a7d7a-2aa2-4982-bfc9-f133216441a4-kube-api-access-795mq\") pod \"watcher-kuttl-api-0\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:25 crc kubenswrapper[4925]: I0121 11:23:25.947009 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:26 crc kubenswrapper[4925]: I0121 11:23:26.179761 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:26 crc kubenswrapper[4925]: I0121 11:23:26.226843 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerStarted","Data":"661d8741744191629c6755b8975b74116de9d3ed33eed967ea17c798ee328076"} Jan 21 11:23:26 crc kubenswrapper[4925]: I0121 11:23:26.603293 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:27 crc kubenswrapper[4925]: I0121 11:23:27.308902 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"912a7d7a-2aa2-4982-bfc9-f133216441a4","Type":"ContainerStarted","Data":"1fc6a08332f0e5e6ce65ae5292fe8e83d1f69e2a30bcecd4b22097924c886dbc"} Jan 21 11:23:27 crc kubenswrapper[4925]: I0121 11:23:27.309329 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"912a7d7a-2aa2-4982-bfc9-f133216441a4","Type":"ContainerStarted","Data":"c44c0c3a95d36f58acc5dde5eb225d46fd2973c388697c9450c1dd95681e348b"} Jan 21 11:23:27 crc kubenswrapper[4925]: I0121 11:23:27.309344 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"912a7d7a-2aa2-4982-bfc9-f133216441a4","Type":"ContainerStarted","Data":"dd3d7429817e8ef513b53a9c1d35018e182ff35324f84594f8a55c6dfc502c04"} Jan 21 11:23:27 crc kubenswrapper[4925]: I0121 11:23:27.310913 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.144:9322/\": dial tcp 10.217.0.144:9322: connect: connection refused" Jan 21 11:23:27 crc kubenswrapper[4925]: I0121 11:23:27.310972 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:27 crc kubenswrapper[4925]: I0121 11:23:27.347545 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.347518174 podStartE2EDuration="2.347518174s" podCreationTimestamp="2026-01-21 11:23:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:23:27.342112372 +0000 UTC m=+1698.946004316" watchObservedRunningTime="2026-01-21 11:23:27.347518174 +0000 UTC m=+1698.951410108" Jan 21 11:23:27 crc kubenswrapper[4925]: I0121 11:23:27.514765 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d216b681-7f33-4a4f-b938-f9cb5b01bbd2" path="/var/lib/kubelet/pods/d216b681-7f33-4a4f-b938-f9cb5b01bbd2/volumes" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.319322 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerStarted","Data":"f7a85a05bcc110952d949dac4bacc35fb05ae2315addb13ff5f987de0ba87c7c"} Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.472686 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj"] Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.481812 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-ss2lj"] Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.567058 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.567966 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" containerName="watcher-applier" containerID="cri-o://d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60" gracePeriod=30 Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.581549 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher48bb-account-delete-cf8zp"] Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.583148 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.609033 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.629988 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher48bb-account-delete-cf8zp"] Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.654190 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd5bd4f3-1c98-439d-bdf6-e04460748efd-operator-scripts\") pod \"watcher48bb-account-delete-cf8zp\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.654289 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjx9t\" (UniqueName: \"kubernetes.io/projected/fd5bd4f3-1c98-439d-bdf6-e04460748efd-kube-api-access-gjx9t\") pod \"watcher48bb-account-delete-cf8zp\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.690930 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.691167 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="a6221ad8-c88f-4c28-a38d-21182311acf0" containerName="watcher-decision-engine" containerID="cri-o://1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa" gracePeriod=30 Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.755974 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd5bd4f3-1c98-439d-bdf6-e04460748efd-operator-scripts\") pod \"watcher48bb-account-delete-cf8zp\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.756059 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjx9t\" (UniqueName: \"kubernetes.io/projected/fd5bd4f3-1c98-439d-bdf6-e04460748efd-kube-api-access-gjx9t\") pod \"watcher48bb-account-delete-cf8zp\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.757387 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd5bd4f3-1c98-439d-bdf6-e04460748efd-operator-scripts\") pod \"watcher48bb-account-delete-cf8zp\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.791936 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjx9t\" (UniqueName: \"kubernetes.io/projected/fd5bd4f3-1c98-439d-bdf6-e04460748efd-kube-api-access-gjx9t\") pod \"watcher48bb-account-delete-cf8zp\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:28 crc kubenswrapper[4925]: I0121 11:23:28.917242 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:29 crc kubenswrapper[4925]: I0121 11:23:29.344530 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher48bb-account-delete-cf8zp"] Jan 21 11:23:29 crc kubenswrapper[4925]: I0121 11:23:29.484907 4925 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="watcher-kuttl-default/watcher-kuttl-api-0" secret="" err="secret \"watcher-watcher-kuttl-dockercfg-pj7kw\" not found" Jan 21 11:23:29 crc kubenswrapper[4925]: I0121 11:23:29.485646 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerStarted","Data":"d33c9920ef63aa7736af457f478191e504558679814f723037691bea367c8fee"} Jan 21 11:23:29 crc kubenswrapper[4925]: I0121 11:23:29.485688 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerStarted","Data":"cc5a78dcdbe4ad6eafbb257a5d6a6fedd4fe804507ad41f7537fd5d529fa294e"} Jan 21 11:23:29 crc kubenswrapper[4925]: I0121 11:23:29.554103 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da55624c-e330-4a8c-b9a5-b07d40ff06ff" path="/var/lib/kubelet/pods/da55624c-e330-4a8c-b9a5-b07d40ff06ff/volumes" Jan 21 11:23:29 crc kubenswrapper[4925]: E0121 11:23:29.590433 4925 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-api-config-data: secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:29 crc kubenswrapper[4925]: E0121 11:23:29.590574 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data podName:912a7d7a-2aa2-4982-bfc9-f133216441a4 nodeName:}" failed. No retries permitted until 2026-01-21 11:23:30.090535121 +0000 UTC m=+1701.694427065 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data") pod "watcher-kuttl-api-0" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4") : secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:30 crc kubenswrapper[4925]: E0121 11:23:30.098898 4925 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-api-config-data: secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:30 crc kubenswrapper[4925]: E0121 11:23:30.099024 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data podName:912a7d7a-2aa2-4982-bfc9-f133216441a4 nodeName:}" failed. No retries permitted until 2026-01-21 11:23:31.099002479 +0000 UTC m=+1702.702894413 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data") pod "watcher-kuttl-api-0" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4") : secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.505233 4925 generic.go:334] "Generic (PLEG): container finished" podID="fd5bd4f3-1c98-439d-bdf6-e04460748efd" containerID="13f4a0ae49b8c0a72a8d5c2931975781ef05f02fed019d43463e851e64cc0acd" exitCode=0 Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.505670 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.505807 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" event={"ID":"fd5bd4f3-1c98-439d-bdf6-e04460748efd","Type":"ContainerDied","Data":"13f4a0ae49b8c0a72a8d5c2931975781ef05f02fed019d43463e851e64cc0acd"} Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.505859 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" event={"ID":"fd5bd4f3-1c98-439d-bdf6-e04460748efd","Type":"ContainerStarted","Data":"7b8644956819ba946b48d01b8fdcdde0a89be527891d17db9c9850844042b769"} Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.505855 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-kuttl-api-log" containerID="cri-o://c44c0c3a95d36f58acc5dde5eb225d46fd2973c388697c9450c1dd95681e348b" gracePeriod=30 Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.505913 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" containerID="cri-o://1fc6a08332f0e5e6ce65ae5292fe8e83d1f69e2a30bcecd4b22097924c886dbc" gracePeriod=30 Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.517751 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.144:9322/\": EOF" Jan 21 11:23:30 crc kubenswrapper[4925]: I0121 11:23:30.948662 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:31 crc kubenswrapper[4925]: E0121 11:23:31.116833 4925 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-api-config-data: secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:31 crc kubenswrapper[4925]: E0121 11:23:31.117251 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data podName:912a7d7a-2aa2-4982-bfc9-f133216441a4 nodeName:}" failed. No retries permitted until 2026-01-21 11:23:33.117229076 +0000 UTC m=+1704.721121010 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data") pod "watcher-kuttl-api-0" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4") : secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:31 crc kubenswrapper[4925]: E0121 11:23:31.174869 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60 is running failed: container process not found" containerID="d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:23:31 crc kubenswrapper[4925]: E0121 11:23:31.175690 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60 is running failed: container process not found" containerID="d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:23:31 crc kubenswrapper[4925]: E0121 11:23:31.176183 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60 is running failed: container process not found" containerID="d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:23:31 crc kubenswrapper[4925]: E0121 11:23:31.176245 4925 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60 is running failed: container process not found" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" containerName="watcher-applier" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.572589 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerStarted","Data":"4dd83395868546a0e767a44b647393400c165be6f456038d425951e28db92dcc"} Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.572933 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.583827 4925 generic.go:334] "Generic (PLEG): container finished" podID="bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" containerID="d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60" exitCode=0 Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.583939 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56","Type":"ContainerDied","Data":"d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60"} Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.587219 4925 generic.go:334] "Generic (PLEG): container finished" podID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerID="c44c0c3a95d36f58acc5dde5eb225d46fd2973c388697c9450c1dd95681e348b" exitCode=143 Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.587507 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"912a7d7a-2aa2-4982-bfc9-f133216441a4","Type":"ContainerDied","Data":"c44c0c3a95d36f58acc5dde5eb225d46fd2973c388697c9450c1dd95681e348b"} Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.707036 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.726461 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.428406684 podStartE2EDuration="6.726440285s" podCreationTimestamp="2026-01-21 11:23:25 +0000 UTC" firstStartedPulling="2026-01-21 11:23:26.191902161 +0000 UTC m=+1697.795794095" lastFinishedPulling="2026-01-21 11:23:30.489935762 +0000 UTC m=+1702.093827696" observedRunningTime="2026-01-21 11:23:31.607994701 +0000 UTC m=+1703.211886655" watchObservedRunningTime="2026-01-21 11:23:31.726440285 +0000 UTC m=+1703.330332219" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.833998 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-logs\") pod \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.834427 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-logs" (OuterVolumeSpecName: "logs") pod "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" (UID: "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.834567 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjfpw\" (UniqueName: \"kubernetes.io/projected/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-kube-api-access-tjfpw\") pod \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.834753 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-combined-ca-bundle\") pod \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.834794 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-config-data\") pod \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\" (UID: \"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56\") " Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.835581 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.844601 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-kube-api-access-tjfpw" (OuterVolumeSpecName: "kube-api-access-tjfpw") pod "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" (UID: "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56"). InnerVolumeSpecName "kube-api-access-tjfpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.879726 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" (UID: "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.940203 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjfpw\" (UniqueName: \"kubernetes.io/projected/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-kube-api-access-tjfpw\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.940239 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:31 crc kubenswrapper[4925]: I0121 11:23:31.954708 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-config-data" (OuterVolumeSpecName: "config-data") pod "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" (UID: "bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.044724 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.163520 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.249078 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd5bd4f3-1c98-439d-bdf6-e04460748efd-operator-scripts\") pod \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.249328 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjx9t\" (UniqueName: \"kubernetes.io/projected/fd5bd4f3-1c98-439d-bdf6-e04460748efd-kube-api-access-gjx9t\") pod \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\" (UID: \"fd5bd4f3-1c98-439d-bdf6-e04460748efd\") " Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.250867 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd5bd4f3-1c98-439d-bdf6-e04460748efd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fd5bd4f3-1c98-439d-bdf6-e04460748efd" (UID: "fd5bd4f3-1c98-439d-bdf6-e04460748efd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.419569 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd5bd4f3-1c98-439d-bdf6-e04460748efd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.423193 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd5bd4f3-1c98-439d-bdf6-e04460748efd-kube-api-access-gjx9t" (OuterVolumeSpecName: "kube-api-access-gjx9t") pod "fd5bd4f3-1c98-439d-bdf6-e04460748efd" (UID: "fd5bd4f3-1c98-439d-bdf6-e04460748efd"). InnerVolumeSpecName "kube-api-access-gjx9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.522058 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjx9t\" (UniqueName: \"kubernetes.io/projected/fd5bd4f3-1c98-439d-bdf6-e04460748efd-kube-api-access-gjx9t\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.598274 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56","Type":"ContainerDied","Data":"e6da232322ec7060da1c1ccbf46312098c077683670781750f300e61da8f203a"} Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.598339 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.598505 4925 scope.go:117] "RemoveContainer" containerID="d31440fd2570d0eb64c7fb5ec9474773fb862cea3693bff078a533cd68d26a60" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.603263 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.611691 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher48bb-account-delete-cf8zp" event={"ID":"fd5bd4f3-1c98-439d-bdf6-e04460748efd","Type":"ContainerDied","Data":"7b8644956819ba946b48d01b8fdcdde0a89be527891d17db9c9850844042b769"} Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.611758 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b8644956819ba946b48d01b8fdcdde0a89be527891d17db9c9850844042b769" Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.662032 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:32 crc kubenswrapper[4925]: I0121 11:23:32.671664 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:33 crc kubenswrapper[4925]: E0121 11:23:33.138813 4925 secret.go:188] Couldn't get secret watcher-kuttl-default/watcher-kuttl-api-config-data: secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:33 crc kubenswrapper[4925]: E0121 11:23:33.139209 4925 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data podName:912a7d7a-2aa2-4982-bfc9-f133216441a4 nodeName:}" failed. No retries permitted until 2026-01-21 11:23:37.139187568 +0000 UTC m=+1708.743079502 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data") pod "watcher-kuttl-api-0" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4") : secret "watcher-kuttl-api-config-data" not found Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.523308 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" path="/var/lib/kubelet/pods/bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56/volumes" Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.523994 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.612682 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-central-agent" containerID="cri-o://f7a85a05bcc110952d949dac4bacc35fb05ae2315addb13ff5f987de0ba87c7c" gracePeriod=30 Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.613296 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="proxy-httpd" containerID="cri-o://4dd83395868546a0e767a44b647393400c165be6f456038d425951e28db92dcc" gracePeriod=30 Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.613422 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="sg-core" containerID="cri-o://d33c9920ef63aa7736af457f478191e504558679814f723037691bea367c8fee" gracePeriod=30 Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.613519 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-notification-agent" containerID="cri-o://cc5a78dcdbe4ad6eafbb257a5d6a6fedd4fe804507ad41f7537fd5d529fa294e" gracePeriod=30 Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.614486 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-h95vt"] Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.625952 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-h95vt"] Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.648488 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher48bb-account-delete-cf8zp"] Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.663983 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher48bb-account-delete-cf8zp"] Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.675166 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-48bb-account-create-update-g87td"] Jan 21 11:23:33 crc kubenswrapper[4925]: I0121 11:23:33.688124 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-48bb-account-create-update-g87td"] Jan 21 11:23:34 crc kubenswrapper[4925]: I0121 11:23:34.632913 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.144:9322/\": read tcp 10.217.0.2:49618->10.217.0.144:9322: read: connection reset by peer" Jan 21 11:23:34 crc kubenswrapper[4925]: I0121 11:23:34.635214 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.144:9322/\": dial tcp 10.217.0.144:9322: connect: connection refused" Jan 21 11:23:34 crc kubenswrapper[4925]: I0121 11:23:34.673866 4925 generic.go:334] "Generic (PLEG): container finished" podID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerID="d33c9920ef63aa7736af457f478191e504558679814f723037691bea367c8fee" exitCode=2 Jan 21 11:23:34 crc kubenswrapper[4925]: I0121 11:23:34.673922 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerDied","Data":"d33c9920ef63aa7736af457f478191e504558679814f723037691bea367c8fee"} Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.549260 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="075d7c44-b7c5-4883-8e7a-b8d2036edf88" path="/var/lib/kubelet/pods/075d7c44-b7c5-4883-8e7a-b8d2036edf88/volumes" Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.550124 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9162a7aa-c49a-4d3c-90cc-6a504448ffe2" path="/var/lib/kubelet/pods/9162a7aa-c49a-4d3c-90cc-6a504448ffe2/volumes" Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.550754 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd5bd4f3-1c98-439d-bdf6-e04460748efd" path="/var/lib/kubelet/pods/fd5bd4f3-1c98-439d-bdf6-e04460748efd/volumes" Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.698603 4925 generic.go:334] "Generic (PLEG): container finished" podID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerID="1fc6a08332f0e5e6ce65ae5292fe8e83d1f69e2a30bcecd4b22097924c886dbc" exitCode=0 Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.698669 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"912a7d7a-2aa2-4982-bfc9-f133216441a4","Type":"ContainerDied","Data":"1fc6a08332f0e5e6ce65ae5292fe8e83d1f69e2a30bcecd4b22097924c886dbc"} Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.705641 4925 generic.go:334] "Generic (PLEG): container finished" podID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerID="4dd83395868546a0e767a44b647393400c165be6f456038d425951e28db92dcc" exitCode=0 Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.705688 4925 generic.go:334] "Generic (PLEG): container finished" podID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerID="cc5a78dcdbe4ad6eafbb257a5d6a6fedd4fe804507ad41f7537fd5d529fa294e" exitCode=0 Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.705699 4925 generic.go:334] "Generic (PLEG): container finished" podID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerID="f7a85a05bcc110952d949dac4bacc35fb05ae2315addb13ff5f987de0ba87c7c" exitCode=0 Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.705724 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerDied","Data":"4dd83395868546a0e767a44b647393400c165be6f456038d425951e28db92dcc"} Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.705768 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerDied","Data":"cc5a78dcdbe4ad6eafbb257a5d6a6fedd4fe804507ad41f7537fd5d529fa294e"} Jan 21 11:23:35 crc kubenswrapper[4925]: I0121 11:23:35.705782 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerDied","Data":"f7a85a05bcc110952d949dac4bacc35fb05ae2315addb13ff5f987de0ba87c7c"} Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.096947 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.104529 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:36 crc kubenswrapper[4925]: E0121 11:23:36.108169 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:23:36 crc kubenswrapper[4925]: E0121 11:23:36.114539 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:23:36 crc kubenswrapper[4925]: E0121 11:23:36.120955 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:23:36 crc kubenswrapper[4925]: E0121 11:23:36.121063 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="a6221ad8-c88f-4c28-a38d-21182311acf0" containerName="watcher-decision-engine" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280295 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912a7d7a-2aa2-4982-bfc9-f133216441a4-logs\") pod \"912a7d7a-2aa2-4982-bfc9-f133216441a4\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280685 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-795mq\" (UniqueName: \"kubernetes.io/projected/912a7d7a-2aa2-4982-bfc9-f133216441a4-kube-api-access-795mq\") pod \"912a7d7a-2aa2-4982-bfc9-f133216441a4\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280764 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-internal-tls-certs\") pod \"912a7d7a-2aa2-4982-bfc9-f133216441a4\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280861 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-public-tls-certs\") pod \"912a7d7a-2aa2-4982-bfc9-f133216441a4\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280885 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-config-data\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280912 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-custom-prometheus-ca\") pod \"912a7d7a-2aa2-4982-bfc9-f133216441a4\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280945 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-ceilometer-tls-certs\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.280995 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-log-httpd\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281019 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-combined-ca-bundle\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281041 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-combined-ca-bundle\") pod \"912a7d7a-2aa2-4982-bfc9-f133216441a4\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281107 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q58h4\" (UniqueName: \"kubernetes.io/projected/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-kube-api-access-q58h4\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281131 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data\") pod \"912a7d7a-2aa2-4982-bfc9-f133216441a4\" (UID: \"912a7d7a-2aa2-4982-bfc9-f133216441a4\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281190 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-sg-core-conf-yaml\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281210 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-run-httpd\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281230 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-scripts\") pod \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\" (UID: \"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8\") " Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.281341 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/912a7d7a-2aa2-4982-bfc9-f133216441a4-logs" (OuterVolumeSpecName: "logs") pod "912a7d7a-2aa2-4982-bfc9-f133216441a4" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.282411 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912a7d7a-2aa2-4982-bfc9-f133216441a4-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.287715 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/912a7d7a-2aa2-4982-bfc9-f133216441a4-kube-api-access-795mq" (OuterVolumeSpecName: "kube-api-access-795mq") pod "912a7d7a-2aa2-4982-bfc9-f133216441a4" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4"). InnerVolumeSpecName "kube-api-access-795mq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.288126 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.288461 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.306757 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-kube-api-access-q58h4" (OuterVolumeSpecName: "kube-api-access-q58h4") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "kube-api-access-q58h4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.312207 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-scripts" (OuterVolumeSpecName: "scripts") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.315299 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "912a7d7a-2aa2-4982-bfc9-f133216441a4" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.317423 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.319480 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "912a7d7a-2aa2-4982-bfc9-f133216441a4" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.337216 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "912a7d7a-2aa2-4982-bfc9-f133216441a4" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.338757 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "912a7d7a-2aa2-4982-bfc9-f133216441a4" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.359004 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data" (OuterVolumeSpecName: "config-data") pod "912a7d7a-2aa2-4982-bfc9-f133216441a4" (UID: "912a7d7a-2aa2-4982-bfc9-f133216441a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.369624 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.370034 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.383811 4925 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384417 4925 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384449 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384464 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384479 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384491 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384503 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384516 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q58h4\" (UniqueName: \"kubernetes.io/projected/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-kube-api-access-q58h4\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384530 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912a7d7a-2aa2-4982-bfc9-f133216441a4-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384541 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384552 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384563 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.384575 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-795mq\" (UniqueName: \"kubernetes.io/projected/912a7d7a-2aa2-4982-bfc9-f133216441a4-kube-api-access-795mq\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.408328 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-config-data" (OuterVolumeSpecName: "config-data") pod "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" (UID: "c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.487154 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.964529 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.964706 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8","Type":"ContainerDied","Data":"661d8741744191629c6755b8975b74116de9d3ed33eed967ea17c798ee328076"} Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.964775 4925 scope.go:117] "RemoveContainer" containerID="4dd83395868546a0e767a44b647393400c165be6f456038d425951e28db92dcc" Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.969335 4925 generic.go:334] "Generic (PLEG): container finished" podID="a6221ad8-c88f-4c28-a38d-21182311acf0" containerID="1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa" exitCode=0 Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.969406 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a6221ad8-c88f-4c28-a38d-21182311acf0","Type":"ContainerDied","Data":"1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa"} Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.981793 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"912a7d7a-2aa2-4982-bfc9-f133216441a4","Type":"ContainerDied","Data":"dd3d7429817e8ef513b53a9c1d35018e182ff35324f84594f8a55c6dfc502c04"} Jan 21 11:23:36 crc kubenswrapper[4925]: I0121 11:23:36.982005 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.011758 4925 scope.go:117] "RemoveContainer" containerID="d33c9920ef63aa7736af457f478191e504558679814f723037691bea367c8fee" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.077319 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.087023 4925 scope.go:117] "RemoveContainer" containerID="cc5a78dcdbe4ad6eafbb257a5d6a6fedd4fe804507ad41f7537fd5d529fa294e" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.089624 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.098015 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.110338 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123316 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123823 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-central-agent" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123837 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-central-agent" Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123849 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="proxy-httpd" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123855 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="proxy-httpd" Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123863 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="sg-core" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123869 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="sg-core" Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123886 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-kuttl-api-log" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123892 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-kuttl-api-log" Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123910 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" containerName="watcher-applier" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123916 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" containerName="watcher-applier" Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123932 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd5bd4f3-1c98-439d-bdf6-e04460748efd" containerName="mariadb-account-delete" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123939 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd5bd4f3-1c98-439d-bdf6-e04460748efd" containerName="mariadb-account-delete" Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123952 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123958 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" Jan 21 11:23:37 crc kubenswrapper[4925]: E0121 11:23:37.123971 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-notification-agent" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.123977 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-notification-agent" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124164 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124177 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-central-agent" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124189 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="ceilometer-notification-agent" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124200 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9f0f20-6fdd-45b6-b5b5-0f26c1fedc56" containerName="watcher-applier" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124211 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="sg-core" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124218 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd5bd4f3-1c98-439d-bdf6-e04460748efd" containerName="mariadb-account-delete" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124224 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-kuttl-api-log" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.124233 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" containerName="proxy-httpd" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.125793 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.127591 4925 scope.go:117] "RemoveContainer" containerID="f7a85a05bcc110952d949dac4bacc35fb05ae2315addb13ff5f987de0ba87c7c" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.128720 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.128798 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.129034 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.133078 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.167326 4925 scope.go:117] "RemoveContainer" containerID="1fc6a08332f0e5e6ce65ae5292fe8e83d1f69e2a30bcecd4b22097924c886dbc" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.192370 4925 scope.go:117] "RemoveContainer" containerID="c44c0c3a95d36f58acc5dde5eb225d46fd2973c388697c9450c1dd95681e348b" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.264129 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-log-httpd\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.264283 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45b5n\" (UniqueName: \"kubernetes.io/projected/f348095e-bcd6-41fd-9ef7-f1836535f7e3-kube-api-access-45b5n\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.264857 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.264933 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-scripts\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.265068 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-config-data\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.265099 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.265190 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-run-httpd\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.265235 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.366989 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45b5n\" (UniqueName: \"kubernetes.io/projected/f348095e-bcd6-41fd-9ef7-f1836535f7e3-kube-api-access-45b5n\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.367070 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.367125 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-scripts\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.367200 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-config-data\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.367223 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.367283 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-run-httpd\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.367314 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.367444 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-log-httpd\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.369303 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-run-httpd\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.369322 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-log-httpd\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.372169 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.372684 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.372721 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.373655 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-config-data\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.374598 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-scripts\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.383205 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45b5n\" (UniqueName: \"kubernetes.io/projected/f348095e-bcd6-41fd-9ef7-f1836535f7e3-kube-api-access-45b5n\") pod \"ceilometer-0\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.456803 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.515305 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" path="/var/lib/kubelet/pods/912a7d7a-2aa2-4982-bfc9-f133216441a4/volumes" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.516166 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8" path="/var/lib/kubelet/pods/c8501cb9-f0b5-40c0-a6ce-5ffe3c2e58d8/volumes" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.559149 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.838595 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6221ad8-c88f-4c28-a38d-21182311acf0-logs\") pod \"a6221ad8-c88f-4c28-a38d-21182311acf0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.838804 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-combined-ca-bundle\") pod \"a6221ad8-c88f-4c28-a38d-21182311acf0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.838897 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-custom-prometheus-ca\") pod \"a6221ad8-c88f-4c28-a38d-21182311acf0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.838974 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-config-data\") pod \"a6221ad8-c88f-4c28-a38d-21182311acf0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.839017 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9vf6\" (UniqueName: \"kubernetes.io/projected/a6221ad8-c88f-4c28-a38d-21182311acf0-kube-api-access-t9vf6\") pod \"a6221ad8-c88f-4c28-a38d-21182311acf0\" (UID: \"a6221ad8-c88f-4c28-a38d-21182311acf0\") " Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.847079 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6221ad8-c88f-4c28-a38d-21182311acf0-logs" (OuterVolumeSpecName: "logs") pod "a6221ad8-c88f-4c28-a38d-21182311acf0" (UID: "a6221ad8-c88f-4c28-a38d-21182311acf0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.893197 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6221ad8-c88f-4c28-a38d-21182311acf0-kube-api-access-t9vf6" (OuterVolumeSpecName: "kube-api-access-t9vf6") pod "a6221ad8-c88f-4c28-a38d-21182311acf0" (UID: "a6221ad8-c88f-4c28-a38d-21182311acf0"). InnerVolumeSpecName "kube-api-access-t9vf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.911321 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6221ad8-c88f-4c28-a38d-21182311acf0" (UID: "a6221ad8-c88f-4c28-a38d-21182311acf0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.941801 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9vf6\" (UniqueName: \"kubernetes.io/projected/a6221ad8-c88f-4c28-a38d-21182311acf0-kube-api-access-t9vf6\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.941841 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6221ad8-c88f-4c28-a38d-21182311acf0-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.941878 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:37 crc kubenswrapper[4925]: I0121 11:23:37.952315 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-config-data" (OuterVolumeSpecName: "config-data") pod "a6221ad8-c88f-4c28-a38d-21182311acf0" (UID: "a6221ad8-c88f-4c28-a38d-21182311acf0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:37.998615 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "a6221ad8-c88f-4c28-a38d-21182311acf0" (UID: "a6221ad8-c88f-4c28-a38d-21182311acf0"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.053514 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.053545 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6221ad8-c88f-4c28-a38d-21182311acf0-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.066363 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a6221ad8-c88f-4c28-a38d-21182311acf0","Type":"ContainerDied","Data":"a9e359a415c54a03268421816a25602c0a59dbeb092867ac5083d8830c7164e2"} Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.066448 4925 scope.go:117] "RemoveContainer" containerID="1d17c6c47bb7c2fcb14c3724231352074d0748b243de7a41a6f4d20f5cc226fa" Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.066592 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.161488 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.166277 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:38 crc kubenswrapper[4925]: I0121 11:23:38.554374 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.093256 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerStarted","Data":"1f8283fdd8414ed4829d99f0c2f2f76cf7cf42e275e2158e6c0090737309b3bd"} Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.294141 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-dxvf6"] Jan 21 11:23:39 crc kubenswrapper[4925]: E0121 11:23:39.294835 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6221ad8-c88f-4c28-a38d-21182311acf0" containerName="watcher-decision-engine" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.294865 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6221ad8-c88f-4c28-a38d-21182311acf0" containerName="watcher-decision-engine" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.295094 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6221ad8-c88f-4c28-a38d-21182311acf0" containerName="watcher-decision-engine" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.295991 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.309597 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-9466-account-create-update-92wd2"] Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.311251 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.314800 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.326243 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dxvf6"] Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.410015 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-9466-account-create-update-92wd2"] Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.438563 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgm82\" (UniqueName: \"kubernetes.io/projected/389e6e04-e316-4ddc-99d0-9c04f661d6b5-kube-api-access-fgm82\") pod \"watcher-db-create-dxvf6\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.438680 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mwhg\" (UniqueName: \"kubernetes.io/projected/eef1032c-ab9e-4ac6-934f-f7544d835d3b-kube-api-access-7mwhg\") pod \"watcher-9466-account-create-update-92wd2\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.438726 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eef1032c-ab9e-4ac6-934f-f7544d835d3b-operator-scripts\") pod \"watcher-9466-account-create-update-92wd2\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.438790 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/389e6e04-e316-4ddc-99d0-9c04f661d6b5-operator-scripts\") pod \"watcher-db-create-dxvf6\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.639981 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/389e6e04-e316-4ddc-99d0-9c04f661d6b5-operator-scripts\") pod \"watcher-db-create-dxvf6\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.640293 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgm82\" (UniqueName: \"kubernetes.io/projected/389e6e04-e316-4ddc-99d0-9c04f661d6b5-kube-api-access-fgm82\") pod \"watcher-db-create-dxvf6\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.640444 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mwhg\" (UniqueName: \"kubernetes.io/projected/eef1032c-ab9e-4ac6-934f-f7544d835d3b-kube-api-access-7mwhg\") pod \"watcher-9466-account-create-update-92wd2\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.640527 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eef1032c-ab9e-4ac6-934f-f7544d835d3b-operator-scripts\") pod \"watcher-9466-account-create-update-92wd2\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.642560 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eef1032c-ab9e-4ac6-934f-f7544d835d3b-operator-scripts\") pod \"watcher-9466-account-create-update-92wd2\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.648130 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/389e6e04-e316-4ddc-99d0-9c04f661d6b5-operator-scripts\") pod \"watcher-db-create-dxvf6\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.665078 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mwhg\" (UniqueName: \"kubernetes.io/projected/eef1032c-ab9e-4ac6-934f-f7544d835d3b-kube-api-access-7mwhg\") pod \"watcher-9466-account-create-update-92wd2\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.672781 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6221ad8-c88f-4c28-a38d-21182311acf0" path="/var/lib/kubelet/pods/a6221ad8-c88f-4c28-a38d-21182311acf0/volumes" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.681009 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgm82\" (UniqueName: \"kubernetes.io/projected/389e6e04-e316-4ddc-99d0-9c04f661d6b5-kube-api-access-fgm82\") pod \"watcher-db-create-dxvf6\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.705881 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:39 crc kubenswrapper[4925]: I0121 11:23:39.764103 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:40 crc kubenswrapper[4925]: I0121 11:23:40.137517 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerStarted","Data":"4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4"} Jan 21 11:23:40 crc kubenswrapper[4925]: I0121 11:23:40.261084 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-9466-account-create-update-92wd2"] Jan 21 11:23:40 crc kubenswrapper[4925]: I0121 11:23:40.718130 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dxvf6"] Jan 21 11:23:40 crc kubenswrapper[4925]: W0121 11:23:40.735282 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod389e6e04_e316_4ddc_99d0_9c04f661d6b5.slice/crio-298aa0854341d617b6ba5cfaa2e0f3938eeb40ac95a1c166762d97f3ffe63a67 WatchSource:0}: Error finding container 298aa0854341d617b6ba5cfaa2e0f3938eeb40ac95a1c166762d97f3ffe63a67: Status 404 returned error can't find the container with id 298aa0854341d617b6ba5cfaa2e0f3938eeb40ac95a1c166762d97f3ffe63a67 Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.078831 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="912a7d7a-2aa2-4982-bfc9-f133216441a4" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.144:9322/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.151545 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerStarted","Data":"334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648"} Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.155248 4925 generic.go:334] "Generic (PLEG): container finished" podID="eef1032c-ab9e-4ac6-934f-f7544d835d3b" containerID="f161dbb0caed16e21e52b7f074e0d1c9d7c19e159c456f1571757b698d1efee6" exitCode=0 Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.155483 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" event={"ID":"eef1032c-ab9e-4ac6-934f-f7544d835d3b","Type":"ContainerDied","Data":"f161dbb0caed16e21e52b7f074e0d1c9d7c19e159c456f1571757b698d1efee6"} Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.155605 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" event={"ID":"eef1032c-ab9e-4ac6-934f-f7544d835d3b","Type":"ContainerStarted","Data":"b17c52a2a4785ff2a3d2c53fc606e47214b14a9844b5dd06e341893d70917bf1"} Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.157679 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-dxvf6" event={"ID":"389e6e04-e316-4ddc-99d0-9c04f661d6b5","Type":"ContainerStarted","Data":"7632f6d38b496a4fd7cd16b8fe205f70aad7831f6cdcd0bc64e95e4954090d65"} Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.157827 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-dxvf6" event={"ID":"389e6e04-e316-4ddc-99d0-9c04f661d6b5","Type":"ContainerStarted","Data":"298aa0854341d617b6ba5cfaa2e0f3938eeb40ac95a1c166762d97f3ffe63a67"} Jan 21 11:23:41 crc kubenswrapper[4925]: I0121 11:23:41.202432 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-db-create-dxvf6" podStartSLOduration=2.202386607 podStartE2EDuration="2.202386607s" podCreationTimestamp="2026-01-21 11:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:23:41.196944644 +0000 UTC m=+1712.800836578" watchObservedRunningTime="2026-01-21 11:23:41.202386607 +0000 UTC m=+1712.806278531" Jan 21 11:23:42 crc kubenswrapper[4925]: I0121 11:23:42.321722 4925 generic.go:334] "Generic (PLEG): container finished" podID="389e6e04-e316-4ddc-99d0-9c04f661d6b5" containerID="7632f6d38b496a4fd7cd16b8fe205f70aad7831f6cdcd0bc64e95e4954090d65" exitCode=0 Jan 21 11:23:42 crc kubenswrapper[4925]: I0121 11:23:42.322335 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-dxvf6" event={"ID":"389e6e04-e316-4ddc-99d0-9c04f661d6b5","Type":"ContainerDied","Data":"7632f6d38b496a4fd7cd16b8fe205f70aad7831f6cdcd0bc64e95e4954090d65"} Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.156098 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.192107 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eef1032c-ab9e-4ac6-934f-f7544d835d3b-operator-scripts\") pod \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.192306 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mwhg\" (UniqueName: \"kubernetes.io/projected/eef1032c-ab9e-4ac6-934f-f7544d835d3b-kube-api-access-7mwhg\") pod \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\" (UID: \"eef1032c-ab9e-4ac6-934f-f7544d835d3b\") " Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.195855 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eef1032c-ab9e-4ac6-934f-f7544d835d3b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eef1032c-ab9e-4ac6-934f-f7544d835d3b" (UID: "eef1032c-ab9e-4ac6-934f-f7544d835d3b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.210673 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eef1032c-ab9e-4ac6-934f-f7544d835d3b-kube-api-access-7mwhg" (OuterVolumeSpecName: "kube-api-access-7mwhg") pod "eef1032c-ab9e-4ac6-934f-f7544d835d3b" (UID: "eef1032c-ab9e-4ac6-934f-f7544d835d3b"). InnerVolumeSpecName "kube-api-access-7mwhg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.294293 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eef1032c-ab9e-4ac6-934f-f7544d835d3b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.294358 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mwhg\" (UniqueName: \"kubernetes.io/projected/eef1032c-ab9e-4ac6-934f-f7544d835d3b-kube-api-access-7mwhg\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.359054 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerStarted","Data":"09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de"} Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.362960 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.362973 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-9466-account-create-update-92wd2" event={"ID":"eef1032c-ab9e-4ac6-934f-f7544d835d3b","Type":"ContainerDied","Data":"b17c52a2a4785ff2a3d2c53fc606e47214b14a9844b5dd06e341893d70917bf1"} Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.363056 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b17c52a2a4785ff2a3d2c53fc606e47214b14a9844b5dd06e341893d70917bf1" Jan 21 11:23:43 crc kubenswrapper[4925]: I0121 11:23:43.980907 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.170142 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgm82\" (UniqueName: \"kubernetes.io/projected/389e6e04-e316-4ddc-99d0-9c04f661d6b5-kube-api-access-fgm82\") pod \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.170341 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/389e6e04-e316-4ddc-99d0-9c04f661d6b5-operator-scripts\") pod \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\" (UID: \"389e6e04-e316-4ddc-99d0-9c04f661d6b5\") " Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.171288 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/389e6e04-e316-4ddc-99d0-9c04f661d6b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "389e6e04-e316-4ddc-99d0-9c04f661d6b5" (UID: "389e6e04-e316-4ddc-99d0-9c04f661d6b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.171505 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/389e6e04-e316-4ddc-99d0-9c04f661d6b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.176589 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/389e6e04-e316-4ddc-99d0-9c04f661d6b5-kube-api-access-fgm82" (OuterVolumeSpecName: "kube-api-access-fgm82") pod "389e6e04-e316-4ddc-99d0-9c04f661d6b5" (UID: "389e6e04-e316-4ddc-99d0-9c04f661d6b5"). InnerVolumeSpecName "kube-api-access-fgm82". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.275437 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgm82\" (UniqueName: \"kubernetes.io/projected/389e6e04-e316-4ddc-99d0-9c04f661d6b5-kube-api-access-fgm82\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.386599 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-dxvf6" event={"ID":"389e6e04-e316-4ddc-99d0-9c04f661d6b5","Type":"ContainerDied","Data":"298aa0854341d617b6ba5cfaa2e0f3938eeb40ac95a1c166762d97f3ffe63a67"} Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.386661 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="298aa0854341d617b6ba5cfaa2e0f3938eeb40ac95a1c166762d97f3ffe63a67" Jan 21 11:23:44 crc kubenswrapper[4925]: I0121 11:23:44.386722 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dxvf6" Jan 21 11:23:45 crc kubenswrapper[4925]: I0121 11:23:45.397788 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerStarted","Data":"5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4"} Jan 21 11:23:45 crc kubenswrapper[4925]: I0121 11:23:45.399132 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:23:45 crc kubenswrapper[4925]: I0121 11:23:45.688245 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=3.279786669 podStartE2EDuration="8.688205595s" podCreationTimestamp="2026-01-21 11:23:37 +0000 UTC" firstStartedPulling="2026-01-21 11:23:38.573155116 +0000 UTC m=+1710.177047050" lastFinishedPulling="2026-01-21 11:23:43.981574042 +0000 UTC m=+1715.585465976" observedRunningTime="2026-01-21 11:23:45.661578579 +0000 UTC m=+1717.265470513" watchObservedRunningTime="2026-01-21 11:23:45.688205595 +0000 UTC m=+1717.292097529" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.740832 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rpbqt"] Jan 21 11:23:48 crc kubenswrapper[4925]: E0121 11:23:48.741745 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eef1032c-ab9e-4ac6-934f-f7544d835d3b" containerName="mariadb-account-create-update" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.741762 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="eef1032c-ab9e-4ac6-934f-f7544d835d3b" containerName="mariadb-account-create-update" Jan 21 11:23:48 crc kubenswrapper[4925]: E0121 11:23:48.741779 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="389e6e04-e316-4ddc-99d0-9c04f661d6b5" containerName="mariadb-database-create" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.741785 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="389e6e04-e316-4ddc-99d0-9c04f661d6b5" containerName="mariadb-database-create" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.741940 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="389e6e04-e316-4ddc-99d0-9c04f661d6b5" containerName="mariadb-database-create" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.741957 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="eef1032c-ab9e-4ac6-934f-f7544d835d3b" containerName="mariadb-account-create-update" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.743336 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.769596 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpbqt"] Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.924873 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-catalog-content\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.925254 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzvrg\" (UniqueName: \"kubernetes.io/projected/376ba5c3-0b92-4c05-9b77-90313d272c54-kube-api-access-lzvrg\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:48 crc kubenswrapper[4925]: I0121 11:23:48.926520 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-utilities\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.028418 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzvrg\" (UniqueName: \"kubernetes.io/projected/376ba5c3-0b92-4c05-9b77-90313d272c54-kube-api-access-lzvrg\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.028556 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-utilities\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.028629 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-catalog-content\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.029104 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-catalog-content\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.029133 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-utilities\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.052143 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzvrg\" (UniqueName: \"kubernetes.io/projected/376ba5c3-0b92-4c05-9b77-90313d272c54-kube-api-access-lzvrg\") pod \"redhat-marketplace-rpbqt\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.068879 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.760964 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd"] Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.762919 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.765337 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.765567 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-s45tj" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.783767 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd"] Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.940816 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.940910 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.944861 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-config-data\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.944972 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-db-sync-config-data\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.946127 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.946202 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b79qj\" (UniqueName: \"kubernetes.io/projected/cc9ed833-63d7-4e23-bc30-bd9cf2722903-kube-api-access-b79qj\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:49 crc kubenswrapper[4925]: I0121 11:23:49.952577 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpbqt"] Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.047236 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-db-sync-config-data\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.047359 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.047384 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b79qj\" (UniqueName: \"kubernetes.io/projected/cc9ed833-63d7-4e23-bc30-bd9cf2722903-kube-api-access-b79qj\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.047433 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-config-data\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.070443 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.070912 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-db-sync-config-data\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.071002 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-config-data\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.077838 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b79qj\" (UniqueName: \"kubernetes.io/projected/cc9ed833-63d7-4e23-bc30-bd9cf2722903-kube-api-access-b79qj\") pod \"watcher-kuttl-db-sync-xbbgd\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.087272 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.599370 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd"] Jan 21 11:23:50 crc kubenswrapper[4925]: W0121 11:23:50.607795 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc9ed833_63d7_4e23_bc30_bd9cf2722903.slice/crio-565efa81e9bcb0deb843be8db79be70dc6f8c63ae6716d93c5a24374c436341b WatchSource:0}: Error finding container 565efa81e9bcb0deb843be8db79be70dc6f8c63ae6716d93c5a24374c436341b: Status 404 returned error can't find the container with id 565efa81e9bcb0deb843be8db79be70dc6f8c63ae6716d93c5a24374c436341b Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.637991 4925 generic.go:334] "Generic (PLEG): container finished" podID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerID="0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a" exitCode=0 Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.638106 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpbqt" event={"ID":"376ba5c3-0b92-4c05-9b77-90313d272c54","Type":"ContainerDied","Data":"0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a"} Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.650473 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpbqt" event={"ID":"376ba5c3-0b92-4c05-9b77-90313d272c54","Type":"ContainerStarted","Data":"3f2b3fd8f245bf42a13f73e4294aa9aaf3a23b5620046af6c7ae59de1ba2d836"} Jan 21 11:23:50 crc kubenswrapper[4925]: I0121 11:23:50.661580 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" event={"ID":"cc9ed833-63d7-4e23-bc30-bd9cf2722903","Type":"ContainerStarted","Data":"565efa81e9bcb0deb843be8db79be70dc6f8c63ae6716d93c5a24374c436341b"} Jan 21 11:23:51 crc kubenswrapper[4925]: I0121 11:23:51.784794 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" event={"ID":"cc9ed833-63d7-4e23-bc30-bd9cf2722903","Type":"ContainerStarted","Data":"1c9b9fff6d436b0ebb69e9dd1b1065cc25421d575000bb067efcbd7a135f4eaa"} Jan 21 11:23:51 crc kubenswrapper[4925]: I0121 11:23:51.807905 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" podStartSLOduration=2.8078861120000003 podStartE2EDuration="2.807886112s" podCreationTimestamp="2026-01-21 11:23:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:23:51.803218084 +0000 UTC m=+1723.407110028" watchObservedRunningTime="2026-01-21 11:23:51.807886112 +0000 UTC m=+1723.411778046" Jan 21 11:23:52 crc kubenswrapper[4925]: I0121 11:23:52.796787 4925 generic.go:334] "Generic (PLEG): container finished" podID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerID="bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18" exitCode=0 Jan 21 11:23:52 crc kubenswrapper[4925]: I0121 11:23:52.796847 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpbqt" event={"ID":"376ba5c3-0b92-4c05-9b77-90313d272c54","Type":"ContainerDied","Data":"bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18"} Jan 21 11:23:53 crc kubenswrapper[4925]: I0121 11:23:53.812604 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpbqt" event={"ID":"376ba5c3-0b92-4c05-9b77-90313d272c54","Type":"ContainerStarted","Data":"288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f"} Jan 21 11:23:56 crc kubenswrapper[4925]: I0121 11:23:56.150901 4925 generic.go:334] "Generic (PLEG): container finished" podID="cc9ed833-63d7-4e23-bc30-bd9cf2722903" containerID="1c9b9fff6d436b0ebb69e9dd1b1065cc25421d575000bb067efcbd7a135f4eaa" exitCode=0 Jan 21 11:23:56 crc kubenswrapper[4925]: I0121 11:23:56.151275 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" event={"ID":"cc9ed833-63d7-4e23-bc30-bd9cf2722903","Type":"ContainerDied","Data":"1c9b9fff6d436b0ebb69e9dd1b1065cc25421d575000bb067efcbd7a135f4eaa"} Jan 21 11:23:56 crc kubenswrapper[4925]: I0121 11:23:56.179297 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rpbqt" podStartSLOduration=5.463616817 podStartE2EDuration="8.179257464s" podCreationTimestamp="2026-01-21 11:23:48 +0000 UTC" firstStartedPulling="2026-01-21 11:23:50.661490083 +0000 UTC m=+1722.265382017" lastFinishedPulling="2026-01-21 11:23:53.37713073 +0000 UTC m=+1724.981022664" observedRunningTime="2026-01-21 11:23:53.839787242 +0000 UTC m=+1725.443679176" watchObservedRunningTime="2026-01-21 11:23:56.179257464 +0000 UTC m=+1727.783149398" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.566934 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.664029 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-combined-ca-bundle\") pod \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.664286 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-config-data\") pod \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.664723 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-db-sync-config-data\") pod \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.664802 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b79qj\" (UniqueName: \"kubernetes.io/projected/cc9ed833-63d7-4e23-bc30-bd9cf2722903-kube-api-access-b79qj\") pod \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\" (UID: \"cc9ed833-63d7-4e23-bc30-bd9cf2722903\") " Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.671281 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "cc9ed833-63d7-4e23-bc30-bd9cf2722903" (UID: "cc9ed833-63d7-4e23-bc30-bd9cf2722903"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.673524 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc9ed833-63d7-4e23-bc30-bd9cf2722903-kube-api-access-b79qj" (OuterVolumeSpecName: "kube-api-access-b79qj") pod "cc9ed833-63d7-4e23-bc30-bd9cf2722903" (UID: "cc9ed833-63d7-4e23-bc30-bd9cf2722903"). InnerVolumeSpecName "kube-api-access-b79qj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.697469 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc9ed833-63d7-4e23-bc30-bd9cf2722903" (UID: "cc9ed833-63d7-4e23-bc30-bd9cf2722903"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.727140 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-config-data" (OuterVolumeSpecName: "config-data") pod "cc9ed833-63d7-4e23-bc30-bd9cf2722903" (UID: "cc9ed833-63d7-4e23-bc30-bd9cf2722903"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.772274 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.772320 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b79qj\" (UniqueName: \"kubernetes.io/projected/cc9ed833-63d7-4e23-bc30-bd9cf2722903-kube-api-access-b79qj\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.772333 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:57 crc kubenswrapper[4925]: I0121 11:23:57.772345 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9ed833-63d7-4e23-bc30-bd9cf2722903-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.175903 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" event={"ID":"cc9ed833-63d7-4e23-bc30-bd9cf2722903","Type":"ContainerDied","Data":"565efa81e9bcb0deb843be8db79be70dc6f8c63ae6716d93c5a24374c436341b"} Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.175961 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.175961 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="565efa81e9bcb0deb843be8db79be70dc6f8c63ae6716d93c5a24374c436341b" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.513274 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:58 crc kubenswrapper[4925]: E0121 11:23:58.513841 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc9ed833-63d7-4e23-bc30-bd9cf2722903" containerName="watcher-kuttl-db-sync" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.513863 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc9ed833-63d7-4e23-bc30-bd9cf2722903" containerName="watcher-kuttl-db-sync" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.514093 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc9ed833-63d7-4e23-bc30-bd9cf2722903" containerName="watcher-kuttl-db-sync" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.514912 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.518887 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-s45tj" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.519187 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.522748 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.524500 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.540852 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/031dfff8-0610-4f70-b32f-afe357397f88-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.540921 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.540956 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541127 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541147 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7ktk\" (UniqueName: \"kubernetes.io/projected/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-kube-api-access-s7ktk\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541223 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541442 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541527 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541578 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541725 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npvvg\" (UniqueName: \"kubernetes.io/projected/031dfff8-0610-4f70-b32f-afe357397f88-kube-api-access-npvvg\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541770 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.541829 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.542036 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.542107 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.552964 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.568510 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.635100 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.636608 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.643916 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645198 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645258 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7ktk\" (UniqueName: \"kubernetes.io/projected/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-kube-api-access-s7ktk\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645289 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645343 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbwv5\" (UniqueName: \"kubernetes.io/projected/11efda36-8315-4323-b18e-a035ff88feeb-kube-api-access-dbwv5\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645381 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645431 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645472 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645497 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645549 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npvvg\" (UniqueName: \"kubernetes.io/projected/031dfff8-0610-4f70-b32f-afe357397f88-kube-api-access-npvvg\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645574 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645605 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645628 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645652 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11efda36-8315-4323-b18e-a035ff88feeb-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645690 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/031dfff8-0610-4f70-b32f-afe357397f88-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645720 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.645745 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.646245 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.651887 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/031dfff8-0610-4f70-b32f-afe357397f88-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.654430 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.660043 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.661386 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.662251 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.664062 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.664820 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.666226 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.666367 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.686237 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7ktk\" (UniqueName: \"kubernetes.io/projected/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-kube-api-access-s7ktk\") pod \"watcher-kuttl-api-0\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.686829 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npvvg\" (UniqueName: \"kubernetes.io/projected/031dfff8-0610-4f70-b32f-afe357397f88-kube-api-access-npvvg\") pod \"watcher-kuttl-applier-0\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.746981 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.747037 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11efda36-8315-4323-b18e-a035ff88feeb-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.747080 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.747128 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbwv5\" (UniqueName: \"kubernetes.io/projected/11efda36-8315-4323-b18e-a035ff88feeb-kube-api-access-dbwv5\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.747164 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.747986 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11efda36-8315-4323-b18e-a035ff88feeb-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.751068 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.755727 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.759539 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.770295 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbwv5\" (UniqueName: \"kubernetes.io/projected/11efda36-8315-4323-b18e-a035ff88feeb-kube-api-access-dbwv5\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.839555 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:23:58 crc kubenswrapper[4925]: I0121 11:23:58.855126 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:23:59 crc kubenswrapper[4925]: I0121 11:23:59.275465 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:59 crc kubenswrapper[4925]: I0121 11:23:59.279209 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:59 crc kubenswrapper[4925]: I0121 11:23:59.287472 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:23:59 crc kubenswrapper[4925]: I0121 11:23:59.338853 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:23:59 crc kubenswrapper[4925]: W0121 11:23:59.739558 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod031dfff8_0610_4f70_b32f_afe357397f88.slice/crio-8c28c031f2346b9ae915a828bb3ade619c121671200a412738c3cd3d177f4f83 WatchSource:0}: Error finding container 8c28c031f2346b9ae915a828bb3ade619c121671200a412738c3cd3d177f4f83: Status 404 returned error can't find the container with id 8c28c031f2346b9ae915a828bb3ade619c121671200a412738c3cd3d177f4f83 Jan 21 11:23:59 crc kubenswrapper[4925]: I0121 11:23:59.759971 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:24:00 crc kubenswrapper[4925]: I0121 11:24:00.201213 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:00 crc kubenswrapper[4925]: I0121 11:24:00.255834 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:24:00 crc kubenswrapper[4925]: I0121 11:24:00.338806 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"031dfff8-0610-4f70-b32f-afe357397f88","Type":"ContainerStarted","Data":"8c28c031f2346b9ae915a828bb3ade619c121671200a412738c3cd3d177f4f83"} Jan 21 11:24:00 crc kubenswrapper[4925]: I0121 11:24:00.343747 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3","Type":"ContainerStarted","Data":"8c66d27e601cda65e023724118e0eef6b3c9fba1e37de47bfffeb0d7b220d755"} Jan 21 11:24:00 crc kubenswrapper[4925]: I0121 11:24:00.347547 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"11efda36-8315-4323-b18e-a035ff88feeb","Type":"ContainerStarted","Data":"fcde1d66bea57184b967f687f1e2d2e566a350527ecfbed74ce9487558612e5d"} Jan 21 11:24:00 crc kubenswrapper[4925]: I0121 11:24:00.436263 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.369067 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"031dfff8-0610-4f70-b32f-afe357397f88","Type":"ContainerStarted","Data":"18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61"} Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.371256 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3","Type":"ContainerStarted","Data":"3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248"} Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.371301 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3","Type":"ContainerStarted","Data":"3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e"} Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.372197 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.373672 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.152:9322/\": dial tcp 10.217.0.152:9322: connect: connection refused" Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.375681 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"11efda36-8315-4323-b18e-a035ff88feeb","Type":"ContainerStarted","Data":"33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132"} Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.394683 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=3.394657987 podStartE2EDuration="3.394657987s" podCreationTimestamp="2026-01-21 11:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:01.391991152 +0000 UTC m=+1732.995883086" watchObservedRunningTime="2026-01-21 11:24:01.394657987 +0000 UTC m=+1732.998549941" Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.430755 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=3.430723103 podStartE2EDuration="3.430723103s" podCreationTimestamp="2026-01-21 11:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:01.416101439 +0000 UTC m=+1733.019993373" watchObservedRunningTime="2026-01-21 11:24:01.430723103 +0000 UTC m=+1733.034615037" Jan 21 11:24:01 crc kubenswrapper[4925]: I0121 11:24:01.448951 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=3.448929742 podStartE2EDuration="3.448929742s" podCreationTimestamp="2026-01-21 11:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:01.443473098 +0000 UTC m=+1733.047365032" watchObservedRunningTime="2026-01-21 11:24:01.448929742 +0000 UTC m=+1733.052821676" Jan 21 11:24:02 crc kubenswrapper[4925]: I0121 11:24:02.932990 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpbqt"] Jan 21 11:24:03 crc kubenswrapper[4925]: I0121 11:24:03.393170 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rpbqt" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="registry-server" containerID="cri-o://288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f" gracePeriod=2 Jan 21 11:24:03 crc kubenswrapper[4925]: I0121 11:24:03.840138 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:03 crc kubenswrapper[4925]: I0121 11:24:03.855818 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.484362 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.496679 4925 generic.go:334] "Generic (PLEG): container finished" podID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerID="288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f" exitCode=0 Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.496781 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.497689 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpbqt" event={"ID":"376ba5c3-0b92-4c05-9b77-90313d272c54","Type":"ContainerDied","Data":"288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f"} Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.497723 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpbqt" event={"ID":"376ba5c3-0b92-4c05-9b77-90313d272c54","Type":"ContainerDied","Data":"3f2b3fd8f245bf42a13f73e4294aa9aaf3a23b5620046af6c7ae59de1ba2d836"} Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.497770 4925 scope.go:117] "RemoveContainer" containerID="288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.525791 4925 scope.go:117] "RemoveContainer" containerID="bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.570057 4925 scope.go:117] "RemoveContainer" containerID="0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.605711 4925 scope.go:117] "RemoveContainer" containerID="288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f" Jan 21 11:24:04 crc kubenswrapper[4925]: E0121 11:24:04.607385 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f\": container with ID starting with 288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f not found: ID does not exist" containerID="288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.607516 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f"} err="failed to get container status \"288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f\": rpc error: code = NotFound desc = could not find container \"288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f\": container with ID starting with 288eba7dec5a3471179df7f4f6729562578456bb8d6bdc6be7c8a2000268ff2f not found: ID does not exist" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.607547 4925 scope.go:117] "RemoveContainer" containerID="bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18" Jan 21 11:24:04 crc kubenswrapper[4925]: E0121 11:24:04.608337 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18\": container with ID starting with bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18 not found: ID does not exist" containerID="bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.608376 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18"} err="failed to get container status \"bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18\": rpc error: code = NotFound desc = could not find container \"bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18\": container with ID starting with bf9716b74d7832207ff622820edc567db96c3cc7798715b0685f2544e2070d18 not found: ID does not exist" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.608415 4925 scope.go:117] "RemoveContainer" containerID="0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a" Jan 21 11:24:04 crc kubenswrapper[4925]: E0121 11:24:04.608739 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a\": container with ID starting with 0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a not found: ID does not exist" containerID="0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.608767 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a"} err="failed to get container status \"0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a\": rpc error: code = NotFound desc = could not find container \"0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a\": container with ID starting with 0be18b7526cda69169ac3808abb3c7e700cdacba7c6f94b4267609bb867a0d0a not found: ID does not exist" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.630655 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzvrg\" (UniqueName: \"kubernetes.io/projected/376ba5c3-0b92-4c05-9b77-90313d272c54-kube-api-access-lzvrg\") pod \"376ba5c3-0b92-4c05-9b77-90313d272c54\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.631170 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-utilities\") pod \"376ba5c3-0b92-4c05-9b77-90313d272c54\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.631326 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-catalog-content\") pod \"376ba5c3-0b92-4c05-9b77-90313d272c54\" (UID: \"376ba5c3-0b92-4c05-9b77-90313d272c54\") " Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.631570 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-utilities" (OuterVolumeSpecName: "utilities") pod "376ba5c3-0b92-4c05-9b77-90313d272c54" (UID: "376ba5c3-0b92-4c05-9b77-90313d272c54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.632083 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.640610 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/376ba5c3-0b92-4c05-9b77-90313d272c54-kube-api-access-lzvrg" (OuterVolumeSpecName: "kube-api-access-lzvrg") pod "376ba5c3-0b92-4c05-9b77-90313d272c54" (UID: "376ba5c3-0b92-4c05-9b77-90313d272c54"). InnerVolumeSpecName "kube-api-access-lzvrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.655214 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "376ba5c3-0b92-4c05-9b77-90313d272c54" (UID: "376ba5c3-0b92-4c05-9b77-90313d272c54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.734526 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376ba5c3-0b92-4c05-9b77-90313d272c54-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:04 crc kubenswrapper[4925]: I0121 11:24:04.734581 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzvrg\" (UniqueName: \"kubernetes.io/projected/376ba5c3-0b92-4c05-9b77-90313d272c54-kube-api-access-lzvrg\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:05 crc kubenswrapper[4925]: I0121 11:24:05.507070 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpbqt" Jan 21 11:24:05 crc kubenswrapper[4925]: I0121 11:24:05.556080 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpbqt"] Jan 21 11:24:05 crc kubenswrapper[4925]: I0121 11:24:05.566900 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpbqt"] Jan 21 11:24:05 crc kubenswrapper[4925]: I0121 11:24:05.707153 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:07 crc kubenswrapper[4925]: I0121 11:24:07.470790 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:07 crc kubenswrapper[4925]: I0121 11:24:07.514994 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" path="/var/lib/kubelet/pods/376ba5c3-0b92-4c05-9b77-90313d272c54/volumes" Jan 21 11:24:08 crc kubenswrapper[4925]: I0121 11:24:08.842636 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:08 crc kubenswrapper[4925]: I0121 11:24:08.858131 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:08 crc kubenswrapper[4925]: I0121 11:24:08.873081 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:08 crc kubenswrapper[4925]: I0121 11:24:08.905488 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:09 crc kubenswrapper[4925]: I0121 11:24:09.276536 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:09 crc kubenswrapper[4925]: I0121 11:24:09.304723 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:09 crc kubenswrapper[4925]: I0121 11:24:09.550887 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:09 crc kubenswrapper[4925]: I0121 11:24:09.558753 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:09 crc kubenswrapper[4925]: I0121 11:24:09.586046 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:09 crc kubenswrapper[4925]: I0121 11:24:09.592883 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.015700 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.016707 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-central-agent" containerID="cri-o://4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4" gracePeriod=30 Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.016763 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="sg-core" containerID="cri-o://09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de" gracePeriod=30 Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.016827 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-notification-agent" containerID="cri-o://334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648" gracePeriod=30 Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.017051 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="proxy-httpd" containerID="cri-o://5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4" gracePeriod=30 Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.659140 4925 generic.go:334] "Generic (PLEG): container finished" podID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerID="5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4" exitCode=0 Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.659193 4925 generic.go:334] "Generic (PLEG): container finished" podID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerID="09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de" exitCode=2 Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.659204 4925 generic.go:334] "Generic (PLEG): container finished" podID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerID="4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4" exitCode=0 Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.659230 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerDied","Data":"5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4"} Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.659267 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerDied","Data":"09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de"} Jan 21 11:24:12 crc kubenswrapper[4925]: I0121 11:24:12.659284 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerDied","Data":"4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4"} Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.104271 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.225835 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-scripts\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.225905 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-combined-ca-bundle\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.225955 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-config-data\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.225987 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45b5n\" (UniqueName: \"kubernetes.io/projected/f348095e-bcd6-41fd-9ef7-f1836535f7e3-kube-api-access-45b5n\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.226053 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-log-httpd\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.226099 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-sg-core-conf-yaml\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.226162 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-ceilometer-tls-certs\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.226318 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-run-httpd\") pod \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\" (UID: \"f348095e-bcd6-41fd-9ef7-f1836535f7e3\") " Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.226797 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.226824 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.244079 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-scripts" (OuterVolumeSpecName: "scripts") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.244927 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f348095e-bcd6-41fd-9ef7-f1836535f7e3-kube-api-access-45b5n" (OuterVolumeSpecName: "kube-api-access-45b5n") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "kube-api-access-45b5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.258590 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.276308 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.538094 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.538141 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.538167 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.538182 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45b5n\" (UniqueName: \"kubernetes.io/projected/f348095e-bcd6-41fd-9ef7-f1836535f7e3-kube-api-access-45b5n\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.538196 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f348095e-bcd6-41fd-9ef7-f1836535f7e3-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.538210 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.559547 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.563811 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-config-data" (OuterVolumeSpecName: "config-data") pod "f348095e-bcd6-41fd-9ef7-f1836535f7e3" (UID: "f348095e-bcd6-41fd-9ef7-f1836535f7e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.640588 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.640623 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f348095e-bcd6-41fd-9ef7-f1836535f7e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.673282 4925 generic.go:334] "Generic (PLEG): container finished" podID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerID="334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648" exitCode=0 Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.673342 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerDied","Data":"334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648"} Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.673381 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f348095e-bcd6-41fd-9ef7-f1836535f7e3","Type":"ContainerDied","Data":"1f8283fdd8414ed4829d99f0c2f2f76cf7cf42e275e2158e6c0090737309b3bd"} Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.673449 4925 scope.go:117] "RemoveContainer" containerID="5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.673648 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.721573 4925 scope.go:117] "RemoveContainer" containerID="09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.726840 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.747881 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756142 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.756574 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="sg-core" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756596 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="sg-core" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.756606 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="extract-utilities" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756619 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="extract-utilities" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.756636 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="registry-server" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756643 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="registry-server" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.756654 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-central-agent" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756660 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-central-agent" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.756668 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-notification-agent" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756674 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-notification-agent" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.756696 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="proxy-httpd" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756702 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="proxy-httpd" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.756715 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="extract-content" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756725 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="extract-content" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756942 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-central-agent" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756963 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="proxy-httpd" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756979 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="sg-core" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756992 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="376ba5c3-0b92-4c05-9b77-90313d272c54" containerName="registry-server" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.757000 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" containerName="ceilometer-notification-agent" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.756708 4925 scope.go:117] "RemoveContainer" containerID="334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.758833 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.764015 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.764240 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.764474 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.787957 4925 scope.go:117] "RemoveContainer" containerID="4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.796500 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.820388 4925 scope.go:117] "RemoveContainer" containerID="5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.820892 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4\": container with ID starting with 5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4 not found: ID does not exist" containerID="5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.820976 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4"} err="failed to get container status \"5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4\": rpc error: code = NotFound desc = could not find container \"5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4\": container with ID starting with 5138d3627b79b10fcf956524077f3ccc078df1479a94c63d625e8ff43fec64e4 not found: ID does not exist" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.821015 4925 scope.go:117] "RemoveContainer" containerID="09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.823948 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de\": container with ID starting with 09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de not found: ID does not exist" containerID="09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.824013 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de"} err="failed to get container status \"09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de\": rpc error: code = NotFound desc = could not find container \"09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de\": container with ID starting with 09601e9ba401e6b1330876a1352775649c95da09e465ab7f6399ebf57de2c3de not found: ID does not exist" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.824046 4925 scope.go:117] "RemoveContainer" containerID="334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.829645 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648\": container with ID starting with 334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648 not found: ID does not exist" containerID="334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.829711 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648"} err="failed to get container status \"334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648\": rpc error: code = NotFound desc = could not find container \"334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648\": container with ID starting with 334a1e18ac8d6e13818ac96abbe3771e70c473fc46cb8db785ddaf9466685648 not found: ID does not exist" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.829747 4925 scope.go:117] "RemoveContainer" containerID="4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4" Jan 21 11:24:13 crc kubenswrapper[4925]: E0121 11:24:13.830927 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4\": container with ID starting with 4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4 not found: ID does not exist" containerID="4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.830984 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4"} err="failed to get container status \"4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4\": rpc error: code = NotFound desc = could not find container \"4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4\": container with ID starting with 4a8f1d177dd3194303cbd6178433575e71bb5e2997073517657b87cb379dc2c4 not found: ID does not exist" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.847377 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-run-httpd\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.847648 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-config-data\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.847832 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mhfs\" (UniqueName: \"kubernetes.io/projected/ad5ac110-8390-43fe-9250-2c304e1d6490-kube-api-access-7mhfs\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.848121 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-scripts\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.848209 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.848341 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.848455 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.848679 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-log-httpd\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.949953 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-scripts\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.950049 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.950096 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.950118 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.950163 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-log-httpd\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.950217 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-run-httpd\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.950240 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-config-data\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.950275 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mhfs\" (UniqueName: \"kubernetes.io/projected/ad5ac110-8390-43fe-9250-2c304e1d6490-kube-api-access-7mhfs\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.951021 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-run-httpd\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.953676 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-log-httpd\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.955505 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.956463 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-config-data\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.956509 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.957072 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-scripts\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.967287 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:13 crc kubenswrapper[4925]: I0121 11:24:13.973572 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mhfs\" (UniqueName: \"kubernetes.io/projected/ad5ac110-8390-43fe-9250-2c304e1d6490-kube-api-access-7mhfs\") pod \"ceilometer-0\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:14 crc kubenswrapper[4925]: I0121 11:24:14.091072 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:14 crc kubenswrapper[4925]: I0121 11:24:14.646518 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:24:14 crc kubenswrapper[4925]: W0121 11:24:14.657870 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad5ac110_8390_43fe_9250_2c304e1d6490.slice/crio-cb160d0db15e3b8f60a15a6cd182c5f20672b7294cd9ea7d645730919f32c5fd WatchSource:0}: Error finding container cb160d0db15e3b8f60a15a6cd182c5f20672b7294cd9ea7d645730919f32c5fd: Status 404 returned error can't find the container with id cb160d0db15e3b8f60a15a6cd182c5f20672b7294cd9ea7d645730919f32c5fd Jan 21 11:24:14 crc kubenswrapper[4925]: I0121 11:24:14.682644 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerStarted","Data":"cb160d0db15e3b8f60a15a6cd182c5f20672b7294cd9ea7d645730919f32c5fd"} Jan 21 11:24:15 crc kubenswrapper[4925]: I0121 11:24:15.514540 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f348095e-bcd6-41fd-9ef7-f1836535f7e3" path="/var/lib/kubelet/pods/f348095e-bcd6-41fd-9ef7-f1836535f7e3/volumes" Jan 21 11:24:15 crc kubenswrapper[4925]: I0121 11:24:15.695255 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerStarted","Data":"c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83"} Jan 21 11:24:17 crc kubenswrapper[4925]: I0121 11:24:17.714588 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerStarted","Data":"b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571"} Jan 21 11:24:17 crc kubenswrapper[4925]: I0121 11:24:17.714937 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerStarted","Data":"92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64"} Jan 21 11:24:18 crc kubenswrapper[4925]: I0121 11:24:18.726380 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerStarted","Data":"8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc"} Jan 21 11:24:18 crc kubenswrapper[4925]: I0121 11:24:18.726769 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:18 crc kubenswrapper[4925]: I0121 11:24:18.763808 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.004316233 podStartE2EDuration="5.76378289s" podCreationTimestamp="2026-01-21 11:24:13 +0000 UTC" firstStartedPulling="2026-01-21 11:24:14.663693544 +0000 UTC m=+1746.267585478" lastFinishedPulling="2026-01-21 11:24:18.423160201 +0000 UTC m=+1750.027052135" observedRunningTime="2026-01-21 11:24:18.762265023 +0000 UTC m=+1750.366156957" watchObservedRunningTime="2026-01-21 11:24:18.76378289 +0000 UTC m=+1750.367674824" Jan 21 11:24:19 crc kubenswrapper[4925]: I0121 11:24:19.941676 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:24:19 crc kubenswrapper[4925]: I0121 11:24:19.941857 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:24:19 crc kubenswrapper[4925]: I0121 11:24:19.942198 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:24:19 crc kubenswrapper[4925]: I0121 11:24:19.943162 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:24:19 crc kubenswrapper[4925]: I0121 11:24:19.943255 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" gracePeriod=600 Jan 21 11:24:20 crc kubenswrapper[4925]: E0121 11:24:20.731548 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:24:20 crc kubenswrapper[4925]: I0121 11:24:20.765948 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" exitCode=0 Jan 21 11:24:20 crc kubenswrapper[4925]: I0121 11:24:20.765999 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73"} Jan 21 11:24:20 crc kubenswrapper[4925]: I0121 11:24:20.766043 4925 scope.go:117] "RemoveContainer" containerID="6fb1cacdd241e7a8efac0b528deff5f04d57c5b631c8479e71c5d41a4ae7e250" Jan 21 11:24:20 crc kubenswrapper[4925]: I0121 11:24:20.767038 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:24:20 crc kubenswrapper[4925]: E0121 11:24:20.767346 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.238449 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.240011 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/memcached-0" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" containerName="memcached" containerID="cri-o://b60e710e30fa24623f902db18ad77e3fe8eb6c4f0f2074eb478050cc60a62331" gracePeriod=30 Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.273060 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.273339 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="031dfff8-0610-4f70-b32f-afe357397f88" containerName="watcher-applier" containerID="cri-o://18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61" gracePeriod=30 Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.287732 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.288016 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="11efda36-8315-4323-b18e-a035ff88feeb" containerName="watcher-decision-engine" containerID="cri-o://33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132" gracePeriod=30 Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.304286 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.304590 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-kuttl-api-log" containerID="cri-o://3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e" gracePeriod=30 Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.304770 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-api" containerID="cri-o://3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248" gracePeriod=30 Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.477084 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-mvrqb"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.487626 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-mvrqb"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.539034 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-79xx6"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.540758 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.543763 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"osp-secret" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.545516 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-memcached-mtls" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.563537 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-79xx6"] Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.783238 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-scripts\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.783381 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-config-data\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.783525 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-combined-ca-bundle\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.783567 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7mf9\" (UniqueName: \"kubernetes.io/projected/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-kube-api-access-g7mf9\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.783601 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-credential-keys\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.783654 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-fernet-keys\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.783689 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-cert-memcached-mtls\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.866834 4925 generic.go:334] "Generic (PLEG): container finished" podID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerID="3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e" exitCode=143 Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.866896 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3","Type":"ContainerDied","Data":"3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e"} Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.885280 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-combined-ca-bundle\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.885371 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7mf9\" (UniqueName: \"kubernetes.io/projected/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-kube-api-access-g7mf9\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.885416 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-credential-keys\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.885451 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-fernet-keys\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.885477 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-cert-memcached-mtls\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.885508 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-scripts\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.885624 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-config-data\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.894288 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-fernet-keys\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.894790 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-scripts\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.895180 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-credential-keys\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.905458 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-combined-ca-bundle\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.909830 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-config-data\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.921177 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7mf9\" (UniqueName: \"kubernetes.io/projected/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-kube-api-access-g7mf9\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:30 crc kubenswrapper[4925]: I0121 11:24:30.938144 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-cert-memcached-mtls\") pod \"keystone-bootstrap-79xx6\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:31 crc kubenswrapper[4925]: I0121 11:24:31.174556 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:31 crc kubenswrapper[4925]: I0121 11:24:31.520096 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61131ba1-5e24-4728-bd8d-adb5a0c63136" path="/var/lib/kubelet/pods/61131ba1-5e24-4728-bd8d-adb5a0c63136/volumes" Jan 21 11:24:31 crc kubenswrapper[4925]: I0121 11:24:31.898645 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-79xx6"] Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.501889 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:24:32 crc kubenswrapper[4925]: E0121 11:24:32.502522 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.571074 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.726255 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7ktk\" (UniqueName: \"kubernetes.io/projected/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-kube-api-access-s7ktk\") pod \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.726343 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-combined-ca-bundle\") pod \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.726386 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-internal-tls-certs\") pod \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.726470 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-custom-prometheus-ca\") pod \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.726512 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-logs\") pod \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.726633 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-config-data\") pod \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.726708 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-public-tls-certs\") pod \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\" (UID: \"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3\") " Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.727925 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-logs" (OuterVolumeSpecName: "logs") pod "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" (UID: "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.731889 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-kube-api-access-s7ktk" (OuterVolumeSpecName: "kube-api-access-s7ktk") pod "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" (UID: "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3"). InnerVolumeSpecName "kube-api-access-s7ktk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.819466 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" (UID: "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.828630 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7ktk\" (UniqueName: \"kubernetes.io/projected/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-kube-api-access-s7ktk\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.828679 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.828693 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.830748 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" (UID: "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.855569 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" (UID: "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.858913 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" (UID: "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.891542 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-config-data" (OuterVolumeSpecName: "config-data") pod "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" (UID: "b6d1bc33-8e30-4eed-9f19-deadbdcd66e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.892361 4925 generic.go:334] "Generic (PLEG): container finished" podID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerID="3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248" exitCode=0 Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.892495 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3","Type":"ContainerDied","Data":"3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248"} Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.892547 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b6d1bc33-8e30-4eed-9f19-deadbdcd66e3","Type":"ContainerDied","Data":"8c66d27e601cda65e023724118e0eef6b3c9fba1e37de47bfffeb0d7b220d755"} Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.892567 4925 scope.go:117] "RemoveContainer" containerID="3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.892772 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.904418 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" event={"ID":"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531","Type":"ContainerStarted","Data":"e6199904b341b7b85a7f106273a26478355bf67ec882e68c0bcc848e0117a6c8"} Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.904478 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" event={"ID":"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531","Type":"ContainerStarted","Data":"7ff96ce299c1c08e3997ba4b258a32c5c31cc6f2b02847af864b4589938d2cd8"} Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.908595 4925 generic.go:334] "Generic (PLEG): container finished" podID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" containerID="b60e710e30fa24623f902db18ad77e3fe8eb6c4f0f2074eb478050cc60a62331" exitCode=0 Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.908634 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"fcfc5c97-6b6f-41b2-8c2b-265e178b2645","Type":"ContainerDied","Data":"b60e710e30fa24623f902db18ad77e3fe8eb6c4f0f2074eb478050cc60a62331"} Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.931979 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.932097 4925 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.932115 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.932126 4925 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:32 crc kubenswrapper[4925]: I0121 11:24:32.945077 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" podStartSLOduration=2.9446513640000003 podStartE2EDuration="2.944651364s" podCreationTimestamp="2026-01-21 11:24:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:32.929418733 +0000 UTC m=+1764.533310667" watchObservedRunningTime="2026-01-21 11:24:32.944651364 +0000 UTC m=+1764.548543298" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.031991 4925 scope.go:117] "RemoveContainer" containerID="3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.039256 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.050972 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.062789 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.063085 4925 scope.go:117] "RemoveContainer" containerID="3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248" Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.063509 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-api" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.063549 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-api" Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.063588 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-kuttl-api-log" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.063597 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-kuttl-api-log" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.063800 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-api" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.063825 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" containerName="watcher-kuttl-api-log" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.064959 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.066292 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248\": container with ID starting with 3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248 not found: ID does not exist" containerID="3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.066345 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248"} err="failed to get container status \"3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248\": rpc error: code = NotFound desc = could not find container \"3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248\": container with ID starting with 3f82fa36d19de1573138f350fd78ac477a6f75c56bfcf62d4da0112563d87248 not found: ID does not exist" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.066387 4925 scope.go:117] "RemoveContainer" containerID="3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e" Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.067056 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e\": container with ID starting with 3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e not found: ID does not exist" containerID="3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.067091 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e"} err="failed to get container status \"3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e\": rpc error: code = NotFound desc = could not find container \"3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e\": container with ID starting with 3651815ae5f0b106ef1d256a5eff35c2e26576969f3f564eff656abcb281c42e not found: ID does not exist" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.067379 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-public-svc" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.067788 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.067986 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-watcher-internal-svc" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.073208 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135102 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135194 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135324 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58140070-f6f6-4498-88ce-78968081eaa0-logs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135354 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135377 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135501 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135561 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.135608 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb8z5\" (UniqueName: \"kubernetes.io/projected/58140070-f6f6-4498-88ce-78968081eaa0-kube-api-access-gb8z5\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.170220 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.277096 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58140070-f6f6-4498-88ce-78968081eaa0-logs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.277165 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.277189 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.277244 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.277743 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.278152 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb8z5\" (UniqueName: \"kubernetes.io/projected/58140070-f6f6-4498-88ce-78968081eaa0-kube-api-access-gb8z5\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.278176 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58140070-f6f6-4498-88ce-78968081eaa0-logs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.278212 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.278430 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.282812 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.282881 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.285091 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.286781 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-internal-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.288002 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-public-tls-certs\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.292712 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.309836 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb8z5\" (UniqueName: \"kubernetes.io/projected/58140070-f6f6-4498-88ce-78968081eaa0-kube-api-access-gb8z5\") pod \"watcher-kuttl-api-0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.379908 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-config-data\") pod \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.380084 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-memcached-tls-certs\") pod \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.380186 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6zmt\" (UniqueName: \"kubernetes.io/projected/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kube-api-access-h6zmt\") pod \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.380228 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-combined-ca-bundle\") pod \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.380348 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kolla-config\") pod \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\" (UID: \"fcfc5c97-6b6f-41b2-8c2b-265e178b2645\") " Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.380881 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-config-data" (OuterVolumeSpecName: "config-data") pod "fcfc5c97-6b6f-41b2-8c2b-265e178b2645" (UID: "fcfc5c97-6b6f-41b2-8c2b-265e178b2645"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.380866 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "fcfc5c97-6b6f-41b2-8c2b-265e178b2645" (UID: "fcfc5c97-6b6f-41b2-8c2b-265e178b2645"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.381303 4925 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.381327 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.386477 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kube-api-access-h6zmt" (OuterVolumeSpecName: "kube-api-access-h6zmt") pod "fcfc5c97-6b6f-41b2-8c2b-265e178b2645" (UID: "fcfc5c97-6b6f-41b2-8c2b-265e178b2645"). InnerVolumeSpecName "kube-api-access-h6zmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.416153 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fcfc5c97-6b6f-41b2-8c2b-265e178b2645" (UID: "fcfc5c97-6b6f-41b2-8c2b-265e178b2645"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.430276 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "fcfc5c97-6b6f-41b2-8c2b-265e178b2645" (UID: "fcfc5c97-6b6f-41b2-8c2b-265e178b2645"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.464987 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.522676 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6zmt\" (UniqueName: \"kubernetes.io/projected/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-kube-api-access-h6zmt\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.522732 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.522747 4925 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfc5c97-6b6f-41b2-8c2b-265e178b2645-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.538031 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6d1bc33-8e30-4eed-9f19-deadbdcd66e3" path="/var/lib/kubelet/pods/b6d1bc33-8e30-4eed-9f19-deadbdcd66e3/volumes" Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.842337 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61 is running failed: container process not found" containerID="18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.842839 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61 is running failed: container process not found" containerID="18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.843190 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61 is running failed: container process not found" containerID="18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:24:33 crc kubenswrapper[4925]: E0121 11:24:33.843230 4925 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61 is running failed: container process not found" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="031dfff8-0610-4f70-b32f-afe357397f88" containerName="watcher-applier" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.938846 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"fcfc5c97-6b6f-41b2-8c2b-265e178b2645","Type":"ContainerDied","Data":"37844f5d7f27bf4321d9749cc8db68f970d3f95cdc48164e8f42e33f67eb9471"} Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.938887 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.938919 4925 scope.go:117] "RemoveContainer" containerID="b60e710e30fa24623f902db18ad77e3fe8eb6c4f0f2074eb478050cc60a62331" Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.940687 4925 generic.go:334] "Generic (PLEG): container finished" podID="031dfff8-0610-4f70-b32f-afe357397f88" containerID="18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61" exitCode=0 Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.940764 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"031dfff8-0610-4f70-b32f-afe357397f88","Type":"ContainerDied","Data":"18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61"} Jan 21 11:24:33 crc kubenswrapper[4925]: I0121 11:24:33.990958 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.000742 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.027521 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:24:34 crc kubenswrapper[4925]: E0121 11:24:34.028114 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" containerName="memcached" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.028139 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" containerName="memcached" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.028306 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" containerName="memcached" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.029079 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.033892 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-memcached-svc" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.034077 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"watcher-kuttl-default"/"memcached-config-data" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.039526 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"memcached-memcached-dockercfg-9c69d" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.047361 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.099062 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.133644 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.133715 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-kolla-config\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.134552 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-config-data\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.134599 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.134662 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t2dv\" (UniqueName: \"kubernetes.io/projected/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-kube-api-access-7t2dv\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.285409 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t2dv\" (UniqueName: \"kubernetes.io/projected/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-kube-api-access-7t2dv\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.285523 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.285569 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-kolla-config\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.285622 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-config-data\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.285644 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.287628 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-kolla-config\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.288334 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-config-data\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.295907 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.310182 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t2dv\" (UniqueName: \"kubernetes.io/projected/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-kube-api-access-7t2dv\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.332301 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed\") " pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.352441 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.699000 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.795303 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-config-data\") pod \"031dfff8-0610-4f70-b32f-afe357397f88\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.795486 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-combined-ca-bundle\") pod \"031dfff8-0610-4f70-b32f-afe357397f88\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.795557 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/031dfff8-0610-4f70-b32f-afe357397f88-logs\") pod \"031dfff8-0610-4f70-b32f-afe357397f88\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.795805 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npvvg\" (UniqueName: \"kubernetes.io/projected/031dfff8-0610-4f70-b32f-afe357397f88-kube-api-access-npvvg\") pod \"031dfff8-0610-4f70-b32f-afe357397f88\" (UID: \"031dfff8-0610-4f70-b32f-afe357397f88\") " Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.796635 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/031dfff8-0610-4f70-b32f-afe357397f88-logs" (OuterVolumeSpecName: "logs") pod "031dfff8-0610-4f70-b32f-afe357397f88" (UID: "031dfff8-0610-4f70-b32f-afe357397f88"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.803144 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/031dfff8-0610-4f70-b32f-afe357397f88-kube-api-access-npvvg" (OuterVolumeSpecName: "kube-api-access-npvvg") pod "031dfff8-0610-4f70-b32f-afe357397f88" (UID: "031dfff8-0610-4f70-b32f-afe357397f88"). InnerVolumeSpecName "kube-api-access-npvvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.822290 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "031dfff8-0610-4f70-b32f-afe357397f88" (UID: "031dfff8-0610-4f70-b32f-afe357397f88"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.845375 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-config-data" (OuterVolumeSpecName: "config-data") pod "031dfff8-0610-4f70-b32f-afe357397f88" (UID: "031dfff8-0610-4f70-b32f-afe357397f88"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.900632 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.900681 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/031dfff8-0610-4f70-b32f-afe357397f88-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.900696 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npvvg\" (UniqueName: \"kubernetes.io/projected/031dfff8-0610-4f70-b32f-afe357397f88-kube-api-access-npvvg\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.900712 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/031dfff8-0610-4f70-b32f-afe357397f88-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.925649 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/memcached-0"] Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.953224 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.953181 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"031dfff8-0610-4f70-b32f-afe357397f88","Type":"ContainerDied","Data":"8c28c031f2346b9ae915a828bb3ade619c121671200a412738c3cd3d177f4f83"} Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.953358 4925 scope.go:117] "RemoveContainer" containerID="18cfba7b0ee62cd689a9ad3f629b0cad0826eacaa87dd5ea8fa38397904aab61" Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.956543 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed","Type":"ContainerStarted","Data":"f62eb3d2cbab3073f71c47bc497ff4e1421f3579a906720410f33306f45ac8e5"} Jan 21 11:24:34 crc kubenswrapper[4925]: I0121 11:24:34.957886 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"58140070-f6f6-4498-88ce-78968081eaa0","Type":"ContainerStarted","Data":"0d6285c180faf2bbcaf951a940f48763016709641c5a9909f205447cc565ca03"} Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.094540 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.103513 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.194409 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:24:35 crc kubenswrapper[4925]: E0121 11:24:35.194939 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031dfff8-0610-4f70-b32f-afe357397f88" containerName="watcher-applier" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.194967 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="031dfff8-0610-4f70-b32f-afe357397f88" containerName="watcher-applier" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.195222 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="031dfff8-0610-4f70-b32f-afe357397f88" containerName="watcher-applier" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.196032 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.201814 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.241039 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.310957 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008045d1-ce08-49f5-b980-d62f4d3e96ba-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.311107 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.311135 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.311152 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.311170 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ldqz\" (UniqueName: \"kubernetes.io/projected/008045d1-ce08-49f5-b980-d62f4d3e96ba-kube-api-access-2ldqz\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.412770 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.412826 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.412848 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ldqz\" (UniqueName: \"kubernetes.io/projected/008045d1-ce08-49f5-b980-d62f4d3e96ba-kube-api-access-2ldqz\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.412895 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008045d1-ce08-49f5-b980-d62f4d3e96ba-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.413016 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.413713 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008045d1-ce08-49f5-b980-d62f4d3e96ba-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.419183 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.421224 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.434663 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.435503 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ldqz\" (UniqueName: \"kubernetes.io/projected/008045d1-ce08-49f5-b980-d62f4d3e96ba-kube-api-access-2ldqz\") pod \"watcher-kuttl-applier-0\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.513875 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="031dfff8-0610-4f70-b32f-afe357397f88" path="/var/lib/kubelet/pods/031dfff8-0610-4f70-b32f-afe357397f88/volumes" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.514806 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" path="/var/lib/kubelet/pods/fcfc5c97-6b6f-41b2-8c2b-265e178b2645/volumes" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.527905 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.973105 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/memcached-0" event={"ID":"1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed","Type":"ContainerStarted","Data":"6e7d1a98228eaf793e0415ea39239d8317b60e60247a64c9d790b21c1a67fa21"} Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.973514 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:35 crc kubenswrapper[4925]: I0121 11:24:35.975123 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"58140070-f6f6-4498-88ce-78968081eaa0","Type":"ContainerStarted","Data":"ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812"} Jan 21 11:24:36 crc kubenswrapper[4925]: I0121 11:24:35.999834 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/memcached-0" podStartSLOduration=2.999786778 podStartE2EDuration="2.999786778s" podCreationTimestamp="2026-01-21 11:24:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:35.998115095 +0000 UTC m=+1767.602007039" watchObservedRunningTime="2026-01-21 11:24:35.999786778 +0000 UTC m=+1767.603678712" Jan 21 11:24:36 crc kubenswrapper[4925]: I0121 11:24:36.022149 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:24:36 crc kubenswrapper[4925]: I0121 11:24:36.985241 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"008045d1-ce08-49f5-b980-d62f4d3e96ba","Type":"ContainerStarted","Data":"32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f"} Jan 21 11:24:36 crc kubenswrapper[4925]: I0121 11:24:36.985670 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"008045d1-ce08-49f5-b980-d62f4d3e96ba","Type":"ContainerStarted","Data":"505767ddbb4ec0c6a76b25fe84c65663329f584a2af9004af660a17d6cb17215"} Jan 21 11:24:36 crc kubenswrapper[4925]: I0121 11:24:36.987373 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"58140070-f6f6-4498-88ce-78968081eaa0","Type":"ContainerStarted","Data":"c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1"} Jan 21 11:24:37 crc kubenswrapper[4925]: I0121 11:24:37.005026 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.005006331 podStartE2EDuration="2.005006331s" podCreationTimestamp="2026-01-21 11:24:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:37.002479481 +0000 UTC m=+1768.606371435" watchObservedRunningTime="2026-01-21 11:24:37.005006331 +0000 UTC m=+1768.608898265" Jan 21 11:24:37 crc kubenswrapper[4925]: I0121 11:24:37.038633 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=5.038606051 podStartE2EDuration="5.038606051s" podCreationTimestamp="2026-01-21 11:24:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:37.036236666 +0000 UTC m=+1768.640128600" watchObservedRunningTime="2026-01-21 11:24:37.038606051 +0000 UTC m=+1768.642497985" Jan 21 11:24:37 crc kubenswrapper[4925]: I0121 11:24:37.999174 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:38 crc kubenswrapper[4925]: I0121 11:24:38.147076 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/memcached-0" podUID="fcfc5c97-6b6f-41b2-8c2b-265e178b2645" containerName="memcached" probeResult="failure" output="dial tcp 10.217.0.108:11211: i/o timeout" Jan 21 11:24:38 crc kubenswrapper[4925]: I0121 11:24:38.523849 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:39 crc kubenswrapper[4925]: I0121 11:24:39.013591 4925 generic.go:334] "Generic (PLEG): container finished" podID="dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" containerID="e6199904b341b7b85a7f106273a26478355bf67ec882e68c0bcc848e0117a6c8" exitCode=0 Jan 21 11:24:39 crc kubenswrapper[4925]: I0121 11:24:39.013632 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" event={"ID":"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531","Type":"ContainerDied","Data":"e6199904b341b7b85a7f106273a26478355bf67ec882e68c0bcc848e0117a6c8"} Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.024047 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.529023 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.737008 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.779184 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-scripts\") pod \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.779424 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-config-data\") pod \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.779504 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-cert-memcached-mtls\") pod \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.779543 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-fernet-keys\") pod \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.779600 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7mf9\" (UniqueName: \"kubernetes.io/projected/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-kube-api-access-g7mf9\") pod \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.779626 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-credential-keys\") pod \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.779708 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-combined-ca-bundle\") pod \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\" (UID: \"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531\") " Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.788814 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" (UID: "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.789380 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" (UID: "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.806292 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-kube-api-access-g7mf9" (OuterVolumeSpecName: "kube-api-access-g7mf9") pod "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" (UID: "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531"). InnerVolumeSpecName "kube-api-access-g7mf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.818748 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" (UID: "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.820446 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-config-data" (OuterVolumeSpecName: "config-data") pod "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" (UID: "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.820509 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-scripts" (OuterVolumeSpecName: "scripts") pod "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" (UID: "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.933680 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.933732 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.933746 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.933757 4925 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.933768 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7mf9\" (UniqueName: \"kubernetes.io/projected/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-kube-api-access-g7mf9\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.933782 4925 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:40 crc kubenswrapper[4925]: I0121 11:24:40.991667 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" (UID: "dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.035170 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" event={"ID":"dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531","Type":"ContainerDied","Data":"7ff96ce299c1c08e3997ba4b258a32c5c31cc6f2b02847af864b4589938d2cd8"} Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.035217 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ff96ce299c1c08e3997ba4b258a32c5c31cc6f2b02847af864b4589938d2cd8" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.035298 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-bootstrap-79xx6" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.035439 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.214478 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.645937 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.674782 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11efda36-8315-4323-b18e-a035ff88feeb-logs\") pod \"11efda36-8315-4323-b18e-a035ff88feeb\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.674830 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbwv5\" (UniqueName: \"kubernetes.io/projected/11efda36-8315-4323-b18e-a035ff88feeb-kube-api-access-dbwv5\") pod \"11efda36-8315-4323-b18e-a035ff88feeb\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.674879 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-combined-ca-bundle\") pod \"11efda36-8315-4323-b18e-a035ff88feeb\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.674898 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-custom-prometheus-ca\") pod \"11efda36-8315-4323-b18e-a035ff88feeb\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.674923 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-config-data\") pod \"11efda36-8315-4323-b18e-a035ff88feeb\" (UID: \"11efda36-8315-4323-b18e-a035ff88feeb\") " Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.678080 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11efda36-8315-4323-b18e-a035ff88feeb-logs" (OuterVolumeSpecName: "logs") pod "11efda36-8315-4323-b18e-a035ff88feeb" (UID: "11efda36-8315-4323-b18e-a035ff88feeb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.712791 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11efda36-8315-4323-b18e-a035ff88feeb-kube-api-access-dbwv5" (OuterVolumeSpecName: "kube-api-access-dbwv5") pod "11efda36-8315-4323-b18e-a035ff88feeb" (UID: "11efda36-8315-4323-b18e-a035ff88feeb"). InnerVolumeSpecName "kube-api-access-dbwv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.716706 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "11efda36-8315-4323-b18e-a035ff88feeb" (UID: "11efda36-8315-4323-b18e-a035ff88feeb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.719721 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "11efda36-8315-4323-b18e-a035ff88feeb" (UID: "11efda36-8315-4323-b18e-a035ff88feeb"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.763791 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-config-data" (OuterVolumeSpecName: "config-data") pod "11efda36-8315-4323-b18e-a035ff88feeb" (UID: "11efda36-8315-4323-b18e-a035ff88feeb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.777337 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.777412 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.777458 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11efda36-8315-4323-b18e-a035ff88feeb-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.777475 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11efda36-8315-4323-b18e-a035ff88feeb-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:41 crc kubenswrapper[4925]: I0121 11:24:41.777491 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbwv5\" (UniqueName: \"kubernetes.io/projected/11efda36-8315-4323-b18e-a035ff88feeb-kube-api-access-dbwv5\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.049723 4925 generic.go:334] "Generic (PLEG): container finished" podID="11efda36-8315-4323-b18e-a035ff88feeb" containerID="33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132" exitCode=0 Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.049781 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"11efda36-8315-4323-b18e-a035ff88feeb","Type":"ContainerDied","Data":"33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132"} Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.049815 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"11efda36-8315-4323-b18e-a035ff88feeb","Type":"ContainerDied","Data":"fcde1d66bea57184b967f687f1e2d2e566a350527ecfbed74ce9487558612e5d"} Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.049833 4925 scope.go:117] "RemoveContainer" containerID="33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.049921 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.097948 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.098690 4925 scope.go:117] "RemoveContainer" containerID="33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132" Jan 21 11:24:42 crc kubenswrapper[4925]: E0121 11:24:42.099353 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132\": container with ID starting with 33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132 not found: ID does not exist" containerID="33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.099520 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132"} err="failed to get container status \"33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132\": rpc error: code = NotFound desc = could not find container \"33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132\": container with ID starting with 33acbd126ef3d412083d1c4f03907553c8d81a7e5d52cd35ed5fe23f425c3132 not found: ID does not exist" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.231991 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.245169 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:24:42 crc kubenswrapper[4925]: E0121 11:24:42.245653 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11efda36-8315-4323-b18e-a035ff88feeb" containerName="watcher-decision-engine" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.245688 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="11efda36-8315-4323-b18e-a035ff88feeb" containerName="watcher-decision-engine" Jan 21 11:24:42 crc kubenswrapper[4925]: E0121 11:24:42.245717 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" containerName="keystone-bootstrap" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.245724 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" containerName="keystone-bootstrap" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.245975 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="11efda36-8315-4323-b18e-a035ff88feeb" containerName="watcher-decision-engine" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.246006 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" containerName="keystone-bootstrap" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.246945 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.255577 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.256708 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.427761 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.427878 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.427932 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8xvt\" (UniqueName: \"kubernetes.io/projected/992e02ca-a0a4-4b4d-befa-33204230b0d1-kube-api-access-j8xvt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.428216 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.428467 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/992e02ca-a0a4-4b4d-befa-33204230b0d1-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.428538 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.532596 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/992e02ca-a0a4-4b4d-befa-33204230b0d1-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.532153 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/992e02ca-a0a4-4b4d-befa-33204230b0d1-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.532721 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.532933 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.533107 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.533560 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8xvt\" (UniqueName: \"kubernetes.io/projected/992e02ca-a0a4-4b4d-befa-33204230b0d1-kube-api-access-j8xvt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.533716 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.536330 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.536542 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.537079 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.542912 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.554979 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8xvt\" (UniqueName: \"kubernetes.io/projected/992e02ca-a0a4-4b4d-befa-33204230b0d1-kube-api-access-j8xvt\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:42 crc kubenswrapper[4925]: I0121 11:24:42.579220 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:43 crc kubenswrapper[4925]: I0121 11:24:43.059752 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:24:43 crc kubenswrapper[4925]: I0121 11:24:43.465314 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:43 crc kubenswrapper[4925]: I0121 11:24:43.525029 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11efda36-8315-4323-b18e-a035ff88feeb" path="/var/lib/kubelet/pods/11efda36-8315-4323-b18e-a035ff88feeb/volumes" Jan 21 11:24:43 crc kubenswrapper[4925]: I0121 11:24:43.746308 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.112070 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.112119 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"992e02ca-a0a4-4b4d-befa-33204230b0d1","Type":"ContainerStarted","Data":"383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a"} Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.112143 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"992e02ca-a0a4-4b4d-befa-33204230b0d1","Type":"ContainerStarted","Data":"c35f1d876671bd0cf7a8f8fae5fb803dc59f4a74f48f8a55b69cbc475d08a9dc"} Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.136538 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.215676 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.215650023 podStartE2EDuration="2.215650023s" podCreationTimestamp="2026-01-21 11:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:44.208760805 +0000 UTC m=+1775.812652739" watchObservedRunningTime="2026-01-21 11:24:44.215650023 +0000 UTC m=+1775.819541967" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.353578 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/memcached-0" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.661230 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-6cf7c7c58-dd2hr"] Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.663126 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.664559 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-6cf7c7c58-dd2hr"] Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.828999 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cs4s\" (UniqueName: \"kubernetes.io/projected/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-kube-api-access-2cs4s\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829054 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-credential-keys\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829137 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-internal-tls-certs\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829174 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-config-data\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829195 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-scripts\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829462 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-fernet-keys\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829531 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-combined-ca-bundle\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829677 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-public-tls-certs\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.829799 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-cert-memcached-mtls\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.930923 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cs4s\" (UniqueName: \"kubernetes.io/projected/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-kube-api-access-2cs4s\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.931470 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-credential-keys\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.931580 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-internal-tls-certs\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.931690 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-config-data\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.931776 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-scripts\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.931878 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-fernet-keys\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.931977 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-combined-ca-bundle\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.932099 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-public-tls-certs\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.932209 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-cert-memcached-mtls\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.937465 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-credential-keys\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.938017 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-cert-memcached-mtls\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.938530 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-internal-tls-certs\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.942089 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-fernet-keys\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.943300 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-config-data\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.944957 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-public-tls-certs\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.945113 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-scripts\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.947592 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-combined-ca-bundle\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.962152 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cs4s\" (UniqueName: \"kubernetes.io/projected/86449d00-a2ae-4fb9-8529-e5a140d7b2f8-kube-api-access-2cs4s\") pod \"keystone-6cf7c7c58-dd2hr\" (UID: \"86449d00-a2ae-4fb9-8529-e5a140d7b2f8\") " pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:44 crc kubenswrapper[4925]: I0121 11:24:44.989001 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:45 crc kubenswrapper[4925]: I0121 11:24:45.502883 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:24:45 crc kubenswrapper[4925]: E0121 11:24:45.503559 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:24:45 crc kubenswrapper[4925]: I0121 11:24:45.528336 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:45 crc kubenswrapper[4925]: I0121 11:24:45.555742 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:45 crc kubenswrapper[4925]: I0121 11:24:45.615110 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-6cf7c7c58-dd2hr"] Jan 21 11:24:46 crc kubenswrapper[4925]: I0121 11:24:46.134992 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" event={"ID":"86449d00-a2ae-4fb9-8529-e5a140d7b2f8","Type":"ContainerStarted","Data":"00791c17b51d73f8937826b3f5128fdb48c813a22dc97573d279be134f80145a"} Jan 21 11:24:46 crc kubenswrapper[4925]: I0121 11:24:46.135759 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:24:46 crc kubenswrapper[4925]: I0121 11:24:46.135794 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" event={"ID":"86449d00-a2ae-4fb9-8529-e5a140d7b2f8","Type":"ContainerStarted","Data":"2d08d7e7e0de557719a2e61264661e6885b8f5b5f6eb7a8fd063c86ffcf859d9"} Jan 21 11:24:46 crc kubenswrapper[4925]: I0121 11:24:46.172793 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" podStartSLOduration=2.172772297 podStartE2EDuration="2.172772297s" podCreationTimestamp="2026-01-21 11:24:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:46.155013576 +0000 UTC m=+1777.758905530" watchObservedRunningTime="2026-01-21 11:24:46.172772297 +0000 UTC m=+1777.776664241" Jan 21 11:24:46 crc kubenswrapper[4925]: I0121 11:24:46.202128 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:24:49 crc kubenswrapper[4925]: I0121 11:24:49.647009 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:49 crc kubenswrapper[4925]: I0121 11:24:49.647913 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-kuttl-api-log" containerID="cri-o://ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812" gracePeriod=30 Jan 21 11:24:49 crc kubenswrapper[4925]: I0121 11:24:49.648019 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-api" containerID="cri-o://c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1" gracePeriod=30 Jan 21 11:24:50 crc kubenswrapper[4925]: I0121 11:24:50.174897 4925 generic.go:334] "Generic (PLEG): container finished" podID="58140070-f6f6-4498-88ce-78968081eaa0" containerID="ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812" exitCode=143 Jan 21 11:24:50 crc kubenswrapper[4925]: I0121 11:24:50.174979 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"58140070-f6f6-4498-88ce-78968081eaa0","Type":"ContainerDied","Data":"ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812"} Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.019052 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198405 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-config-data\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198461 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-custom-prometheus-ca\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198505 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gb8z5\" (UniqueName: \"kubernetes.io/projected/58140070-f6f6-4498-88ce-78968081eaa0-kube-api-access-gb8z5\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198576 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-public-tls-certs\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198605 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-combined-ca-bundle\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198667 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58140070-f6f6-4498-88ce-78968081eaa0-logs\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198703 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-cert-memcached-mtls\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.198806 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-internal-tls-certs\") pod \"58140070-f6f6-4498-88ce-78968081eaa0\" (UID: \"58140070-f6f6-4498-88ce-78968081eaa0\") " Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.200089 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58140070-f6f6-4498-88ce-78968081eaa0-logs" (OuterVolumeSpecName: "logs") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.214240 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58140070-f6f6-4498-88ce-78968081eaa0-kube-api-access-gb8z5" (OuterVolumeSpecName: "kube-api-access-gb8z5") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "kube-api-access-gb8z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.229999 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.244787 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.259291 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-config-data" (OuterVolumeSpecName: "config-data") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.263379 4925 generic.go:334] "Generic (PLEG): container finished" podID="58140070-f6f6-4498-88ce-78968081eaa0" containerID="c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1" exitCode=0 Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.263633 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"58140070-f6f6-4498-88ce-78968081eaa0","Type":"ContainerDied","Data":"c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1"} Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.263789 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"58140070-f6f6-4498-88ce-78968081eaa0","Type":"ContainerDied","Data":"0d6285c180faf2bbcaf951a940f48763016709641c5a9909f205447cc565ca03"} Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.263920 4925 scope.go:117] "RemoveContainer" containerID="c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.264239 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.280869 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.301550 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.301611 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.301628 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gb8z5\" (UniqueName: \"kubernetes.io/projected/58140070-f6f6-4498-88ce-78968081eaa0-kube-api-access-gb8z5\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.301644 4925 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.301656 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.301668 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58140070-f6f6-4498-88ce-78968081eaa0-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.303063 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.303271 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "58140070-f6f6-4498-88ce-78968081eaa0" (UID: "58140070-f6f6-4498-88ce-78968081eaa0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.324493 4925 scope.go:117] "RemoveContainer" containerID="ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.347564 4925 scope.go:117] "RemoveContainer" containerID="c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1" Jan 21 11:24:52 crc kubenswrapper[4925]: E0121 11:24:52.348265 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1\": container with ID starting with c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1 not found: ID does not exist" containerID="c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.348302 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1"} err="failed to get container status \"c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1\": rpc error: code = NotFound desc = could not find container \"c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1\": container with ID starting with c81154f3fb28ab834ef1341747fab09fb06b3ac803936563c30a131c8a5907a1 not found: ID does not exist" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.348343 4925 scope.go:117] "RemoveContainer" containerID="ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812" Jan 21 11:24:52 crc kubenswrapper[4925]: E0121 11:24:52.348976 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812\": container with ID starting with ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812 not found: ID does not exist" containerID="ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.349149 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812"} err="failed to get container status \"ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812\": rpc error: code = NotFound desc = could not find container \"ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812\": container with ID starting with ad20b25dc05eef0642b9f56d35e4823af502a6ba53d3f43b7cae4af600d2e812 not found: ID does not exist" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.403362 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.403435 4925 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58140070-f6f6-4498-88ce-78968081eaa0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.579783 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.611509 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.634227 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.661022 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.677030 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:52 crc kubenswrapper[4925]: E0121 11:24:52.677499 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-api" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.677520 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-api" Jan 21 11:24:52 crc kubenswrapper[4925]: E0121 11:24:52.677532 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-kuttl-api-log" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.677540 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-kuttl-api-log" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.677787 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-api" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.677807 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="58140070-f6f6-4498-88ce-78968081eaa0" containerName="watcher-kuttl-api-log" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.678864 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.681224 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.688329 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.810712 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.810793 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8ee3340-dbf7-4080-90e8-d179aaebd42a-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.810844 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8wf8\" (UniqueName: \"kubernetes.io/projected/b8ee3340-dbf7-4080-90e8-d179aaebd42a-kube-api-access-m8wf8\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.810882 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.810980 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.811062 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.912503 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.912939 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8ee3340-dbf7-4080-90e8-d179aaebd42a-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.913095 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8wf8\" (UniqueName: \"kubernetes.io/projected/b8ee3340-dbf7-4080-90e8-d179aaebd42a-kube-api-access-m8wf8\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.913214 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.913353 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8ee3340-dbf7-4080-90e8-d179aaebd42a-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.913496 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.913659 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.917474 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.917580 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.918110 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.920034 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:52 crc kubenswrapper[4925]: I0121 11:24:52.944051 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8wf8\" (UniqueName: \"kubernetes.io/projected/b8ee3340-dbf7-4080-90e8-d179aaebd42a-kube-api-access-m8wf8\") pod \"watcher-kuttl-api-0\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:53 crc kubenswrapper[4925]: I0121 11:24:52.999986 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:53 crc kubenswrapper[4925]: I0121 11:24:53.350411 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:53 crc kubenswrapper[4925]: I0121 11:24:53.525155 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58140070-f6f6-4498-88ce-78968081eaa0" path="/var/lib/kubelet/pods/58140070-f6f6-4498-88ce-78968081eaa0/volumes" Jan 21 11:24:53 crc kubenswrapper[4925]: I0121 11:24:53.525867 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:24:53 crc kubenswrapper[4925]: I0121 11:24:53.561507 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:24:54 crc kubenswrapper[4925]: I0121 11:24:54.372753 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b8ee3340-dbf7-4080-90e8-d179aaebd42a","Type":"ContainerStarted","Data":"31bf633a2d829c9e7056a2e2f6db7505030974dcf5cb195d8a7b40359aff3499"} Jan 21 11:24:54 crc kubenswrapper[4925]: I0121 11:24:54.373129 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:54 crc kubenswrapper[4925]: I0121 11:24:54.373144 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b8ee3340-dbf7-4080-90e8-d179aaebd42a","Type":"ContainerStarted","Data":"af0133f221b82f8e8d7d1bfd4b72c8c8398b721b9d116ba44ded90d3c40886b5"} Jan 21 11:24:54 crc kubenswrapper[4925]: I0121 11:24:54.373154 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b8ee3340-dbf7-4080-90e8-d179aaebd42a","Type":"ContainerStarted","Data":"6f5f3e05e8437eff910f5101f2f6d9076a3199b6b7d7249a116a1b56d61ba93c"} Jan 21 11:24:54 crc kubenswrapper[4925]: I0121 11:24:54.375416 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.161:9322/\": dial tcp 10.217.0.161:9322: connect: connection refused" Jan 21 11:24:54 crc kubenswrapper[4925]: I0121 11:24:54.415000 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.414970605 podStartE2EDuration="2.414970605s" podCreationTimestamp="2026-01-21 11:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:24:54.396961346 +0000 UTC m=+1786.000853290" watchObservedRunningTime="2026-01-21 11:24:54.414970605 +0000 UTC m=+1786.018862539" Jan 21 11:24:58 crc kubenswrapper[4925]: I0121 11:24:58.000802 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:24:58 crc kubenswrapper[4925]: I0121 11:24:58.718069 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:25:00 crc kubenswrapper[4925]: I0121 11:25:00.715306 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:25:00 crc kubenswrapper[4925]: E0121 11:25:00.715909 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:25:03 crc kubenswrapper[4925]: I0121 11:25:03.000346 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:25:03 crc kubenswrapper[4925]: I0121 11:25:03.007002 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:25:03 crc kubenswrapper[4925]: I0121 11:25:03.621990 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:25:11 crc kubenswrapper[4925]: I0121 11:25:11.325911 4925 scope.go:117] "RemoveContainer" containerID="41f8a4515161523db7491d9b04231422b1096aad3dc3c3a4038c5a7efa0fdd89" Jan 21 11:25:14 crc kubenswrapper[4925]: I0121 11:25:14.502512 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:25:14 crc kubenswrapper[4925]: E0121 11:25:14.503467 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:25:17 crc kubenswrapper[4925]: I0121 11:25:17.232012 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/keystone-6cf7c7c58-dd2hr" Jan 21 11:25:17 crc kubenswrapper[4925]: I0121 11:25:17.427955 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-665b4c6f5-gzz77"] Jan 21 11:25:17 crc kubenswrapper[4925]: I0121 11:25:17.428239 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" podUID="cac93b53-352f-4f28-a456-b80df0aa2670" containerName="keystone-api" containerID="cri-o://0ccbe5d1bab3ab9004e7695c4ce67a3a4372062e630d2bc5f444e0c3b2ba84c6" gracePeriod=30 Jan 21 11:25:20 crc kubenswrapper[4925]: I0121 11:25:20.892385 4925 generic.go:334] "Generic (PLEG): container finished" podID="cac93b53-352f-4f28-a456-b80df0aa2670" containerID="0ccbe5d1bab3ab9004e7695c4ce67a3a4372062e630d2bc5f444e0c3b2ba84c6" exitCode=0 Jan 21 11:25:20 crc kubenswrapper[4925]: I0121 11:25:20.892492 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" event={"ID":"cac93b53-352f-4f28-a456-b80df0aa2670","Type":"ContainerDied","Data":"0ccbe5d1bab3ab9004e7695c4ce67a3a4372062e630d2bc5f444e0c3b2ba84c6"} Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.230793 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.336578 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-internal-tls-certs\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.337042 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-config-data\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.337075 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-public-tls-certs\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.337150 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-scripts\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.337180 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-combined-ca-bundle\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.337284 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m5gg\" (UniqueName: \"kubernetes.io/projected/cac93b53-352f-4f28-a456-b80df0aa2670-kube-api-access-6m5gg\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.337316 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-credential-keys\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.337364 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-fernet-keys\") pod \"cac93b53-352f-4f28-a456-b80df0aa2670\" (UID: \"cac93b53-352f-4f28-a456-b80df0aa2670\") " Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.345659 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.345685 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-scripts" (OuterVolumeSpecName: "scripts") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.345895 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cac93b53-352f-4f28-a456-b80df0aa2670-kube-api-access-6m5gg" (OuterVolumeSpecName: "kube-api-access-6m5gg") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "kube-api-access-6m5gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.348134 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.388999 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-config-data" (OuterVolumeSpecName: "config-data") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.389917 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.391936 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.400613 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cac93b53-352f-4f28-a456-b80df0aa2670" (UID: "cac93b53-352f-4f28-a456-b80df0aa2670"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439489 4925 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439539 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439550 4925 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439561 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439575 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439589 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m5gg\" (UniqueName: \"kubernetes.io/projected/cac93b53-352f-4f28-a456-b80df0aa2670-kube-api-access-6m5gg\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439601 4925 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.439611 4925 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cac93b53-352f-4f28-a456-b80df0aa2670-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.905141 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" event={"ID":"cac93b53-352f-4f28-a456-b80df0aa2670","Type":"ContainerDied","Data":"72e0783ccda2a2894004575b9ac6b1711e66a0188ddb127e8e8657abf5b62e77"} Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.905230 4925 scope.go:117] "RemoveContainer" containerID="0ccbe5d1bab3ab9004e7695c4ce67a3a4372062e630d2bc5f444e0c3b2ba84c6" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.905454 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-665b4c6f5-gzz77" Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.962064 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-665b4c6f5-gzz77"] Jan 21 11:25:21 crc kubenswrapper[4925]: I0121 11:25:21.970389 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-665b4c6f5-gzz77"] Jan 21 11:25:23 crc kubenswrapper[4925]: I0121 11:25:23.514598 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cac93b53-352f-4f28-a456-b80df0aa2670" path="/var/lib/kubelet/pods/cac93b53-352f-4f28-a456-b80df0aa2670/volumes" Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.783720 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.784626 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="sg-core" containerID="cri-o://b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571" gracePeriod=30 Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.784678 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-notification-agent" containerID="cri-o://92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64" gracePeriod=30 Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.784676 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="proxy-httpd" containerID="cri-o://8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc" gracePeriod=30 Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.784563 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-central-agent" containerID="cri-o://c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83" gracePeriod=30 Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.941214 4925 generic.go:334] "Generic (PLEG): container finished" podID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerID="8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc" exitCode=0 Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.941659 4925 generic.go:334] "Generic (PLEG): container finished" podID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerID="b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571" exitCode=2 Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.941283 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerDied","Data":"8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc"} Jan 21 11:25:24 crc kubenswrapper[4925]: I0121 11:25:24.942129 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerDied","Data":"b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571"} Jan 21 11:25:25 crc kubenswrapper[4925]: I0121 11:25:25.502149 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:25:25 crc kubenswrapper[4925]: E0121 11:25:25.502593 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:25:25 crc kubenswrapper[4925]: I0121 11:25:25.956656 4925 generic.go:334] "Generic (PLEG): container finished" podID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerID="c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83" exitCode=0 Jan 21 11:25:25 crc kubenswrapper[4925]: I0121 11:25:25.956717 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerDied","Data":"c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83"} Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.859347 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.976821 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-log-httpd\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.976891 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-run-httpd\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.976922 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-config-data\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.976950 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-scripts\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.977020 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mhfs\" (UniqueName: \"kubernetes.io/projected/ad5ac110-8390-43fe-9250-2c304e1d6490-kube-api-access-7mhfs\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.977095 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-ceilometer-tls-certs\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.977182 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-combined-ca-bundle\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.977282 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-sg-core-conf-yaml\") pod \"ad5ac110-8390-43fe-9250-2c304e1d6490\" (UID: \"ad5ac110-8390-43fe-9250-2c304e1d6490\") " Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.977370 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.977499 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.978093 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.978126 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad5ac110-8390-43fe-9250-2c304e1d6490-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.993077 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad5ac110-8390-43fe-9250-2c304e1d6490-kube-api-access-7mhfs" (OuterVolumeSpecName: "kube-api-access-7mhfs") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "kube-api-access-7mhfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:25:31 crc kubenswrapper[4925]: I0121 11:25:31.995496 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-scripts" (OuterVolumeSpecName: "scripts") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.010579 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.025223 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.049442 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.079847 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.079906 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mhfs\" (UniqueName: \"kubernetes.io/projected/ad5ac110-8390-43fe-9250-2c304e1d6490-kube-api-access-7mhfs\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.079924 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.079937 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.079951 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.090727 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-config-data" (OuterVolumeSpecName: "config-data") pod "ad5ac110-8390-43fe-9250-2c304e1d6490" (UID: "ad5ac110-8390-43fe-9250-2c304e1d6490"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.124895 4925 generic.go:334] "Generic (PLEG): container finished" podID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerID="92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64" exitCode=0 Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.124939 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerDied","Data":"92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64"} Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.124968 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"ad5ac110-8390-43fe-9250-2c304e1d6490","Type":"ContainerDied","Data":"cb160d0db15e3b8f60a15a6cd182c5f20672b7294cd9ea7d645730919f32c5fd"} Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.125039 4925 scope.go:117] "RemoveContainer" containerID="8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.125216 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.156411 4925 scope.go:117] "RemoveContainer" containerID="b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.178627 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.185484 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad5ac110-8390-43fe-9250-2c304e1d6490-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.186692 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.206928 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.207478 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="sg-core" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207501 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="sg-core" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.207514 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-central-agent" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207522 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-central-agent" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.207553 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="proxy-httpd" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207577 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="proxy-httpd" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.207597 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cac93b53-352f-4f28-a456-b80df0aa2670" containerName="keystone-api" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207606 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="cac93b53-352f-4f28-a456-b80df0aa2670" containerName="keystone-api" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.207620 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-notification-agent" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207626 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-notification-agent" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207805 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="cac93b53-352f-4f28-a456-b80df0aa2670" containerName="keystone-api" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207824 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-central-agent" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207840 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="sg-core" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207847 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="proxy-httpd" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.207856 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" containerName="ceilometer-notification-agent" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.208892 4925 scope.go:117] "RemoveContainer" containerID="92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.209654 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.212143 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.213002 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.213024 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.244268 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.260833 4925 scope.go:117] "RemoveContainer" containerID="c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.289655 4925 scope.go:117] "RemoveContainer" containerID="8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.290338 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc\": container with ID starting with 8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc not found: ID does not exist" containerID="8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.290512 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc"} err="failed to get container status \"8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc\": rpc error: code = NotFound desc = could not find container \"8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc\": container with ID starting with 8e5202ca2985b77c83fae1cb5cae4f80721c8cf20d309f6139a98d92015224cc not found: ID does not exist" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.290552 4925 scope.go:117] "RemoveContainer" containerID="b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.290974 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571\": container with ID starting with b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571 not found: ID does not exist" containerID="b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.291022 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571"} err="failed to get container status \"b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571\": rpc error: code = NotFound desc = could not find container \"b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571\": container with ID starting with b484cb879a1ca1db556002fd042be0d83f7d4b0bf2d97e8b14eb93546ee3a571 not found: ID does not exist" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.291054 4925 scope.go:117] "RemoveContainer" containerID="92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.291427 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64\": container with ID starting with 92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64 not found: ID does not exist" containerID="92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.291468 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64"} err="failed to get container status \"92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64\": rpc error: code = NotFound desc = could not find container \"92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64\": container with ID starting with 92659f78e76f2a473a8f4e37612dacca4f1ac4f564f92ef76577b3b535a6cf64 not found: ID does not exist" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.291511 4925 scope.go:117] "RemoveContainer" containerID="c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83" Jan 21 11:25:32 crc kubenswrapper[4925]: E0121 11:25:32.291952 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83\": container with ID starting with c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83 not found: ID does not exist" containerID="c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.291980 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83"} err="failed to get container status \"c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83\": rpc error: code = NotFound desc = could not find container \"c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83\": container with ID starting with c22644464b73f0264bb6baf1eb5a173bb3d4ae4286bd086b62293fcae50f0e83 not found: ID does not exist" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.461121 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bf8c6\" (UniqueName: \"kubernetes.io/projected/f0dc5004-1da5-4397-a12a-57120943250e-kube-api-access-bf8c6\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.461582 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-run-httpd\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.461832 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-log-httpd\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.462091 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.462343 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-scripts\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.462507 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.462636 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.462977 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-config-data\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.564916 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.565619 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.565789 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-config-data\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.566037 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bf8c6\" (UniqueName: \"kubernetes.io/projected/f0dc5004-1da5-4397-a12a-57120943250e-kube-api-access-bf8c6\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.566194 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-run-httpd\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.566832 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-run-httpd\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.566964 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-log-httpd\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.567161 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.567373 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-scripts\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.567554 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-log-httpd\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.570474 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.570539 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.571627 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.572001 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-scripts\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.581875 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-config-data\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.584903 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bf8c6\" (UniqueName: \"kubernetes.io/projected/f0dc5004-1da5-4397-a12a-57120943250e-kube-api-access-bf8c6\") pod \"ceilometer-0\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:32 crc kubenswrapper[4925]: I0121 11:25:32.874012 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:33 crc kubenswrapper[4925]: I0121 11:25:33.439190 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:25:33 crc kubenswrapper[4925]: I0121 11:25:33.455330 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:25:33 crc kubenswrapper[4925]: I0121 11:25:33.519256 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad5ac110-8390-43fe-9250-2c304e1d6490" path="/var/lib/kubelet/pods/ad5ac110-8390-43fe-9250-2c304e1d6490/volumes" Jan 21 11:25:34 crc kubenswrapper[4925]: I0121 11:25:34.160960 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerStarted","Data":"cae1af74f635b2490a4a5226b82615add1f9ede8c39645653d4ba2e4becffbc8"} Jan 21 11:25:35 crc kubenswrapper[4925]: I0121 11:25:35.170517 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerStarted","Data":"72a020f373b3fbe14979c6a545daba8ca789df6b9c42a9a86e5fb1c30670a47d"} Jan 21 11:25:37 crc kubenswrapper[4925]: I0121 11:25:37.191182 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerStarted","Data":"cc0375e0f1ae04167d986b8b109281d10dfb375710376a55b880c07fc5e7d321"} Jan 21 11:25:37 crc kubenswrapper[4925]: I0121 11:25:37.507048 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:25:37 crc kubenswrapper[4925]: E0121 11:25:37.507351 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:25:39 crc kubenswrapper[4925]: I0121 11:25:39.214041 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerStarted","Data":"92f4dbaa73a9f2a272c09f85d5e444f540b2176bdb4cc9a18bf29642c7f6c8ee"} Jan 21 11:25:40 crc kubenswrapper[4925]: I0121 11:25:40.227635 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerStarted","Data":"81ad7533bb89ecf78ccd3736ea8675d42e6fc92e79fd092b9383bfefd290c4e2"} Jan 21 11:25:40 crc kubenswrapper[4925]: I0121 11:25:40.229291 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:25:40 crc kubenswrapper[4925]: I0121 11:25:40.256354 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.7860271559999998 podStartE2EDuration="8.256300172s" podCreationTimestamp="2026-01-21 11:25:32 +0000 UTC" firstStartedPulling="2026-01-21 11:25:33.454822728 +0000 UTC m=+1825.058714662" lastFinishedPulling="2026-01-21 11:25:39.925095744 +0000 UTC m=+1831.528987678" observedRunningTime="2026-01-21 11:25:40.250411207 +0000 UTC m=+1831.854303151" watchObservedRunningTime="2026-01-21 11:25:40.256300172 +0000 UTC m=+1831.860192106" Jan 21 11:25:52 crc kubenswrapper[4925]: I0121 11:25:52.501359 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:25:52 crc kubenswrapper[4925]: E0121 11:25:52.502158 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:26:02 crc kubenswrapper[4925]: I0121 11:26:02.892220 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:03 crc kubenswrapper[4925]: I0121 11:26:03.502994 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:26:03 crc kubenswrapper[4925]: E0121 11:26:03.503547 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.166063 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd"] Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.177218 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-xbbgd"] Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.243895 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher9466-account-delete-4s6rs"] Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.245411 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.253096 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.253421 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="008045d1-ce08-49f5-b980-d62f4d3e96ba" containerName="watcher-applier" containerID="cri-o://32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" gracePeriod=30 Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.280494 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher9466-account-delete-4s6rs"] Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.324265 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.324561 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-kuttl-api-log" containerID="cri-o://af0133f221b82f8e8d7d1bfd4b72c8c8398b721b9d116ba44ded90d3c40886b5" gracePeriod=30 Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.325242 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-api" containerID="cri-o://31bf633a2d829c9e7056a2e2f6db7505030974dcf5cb195d8a7b40359aff3499" gracePeriod=30 Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.403485 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.403914 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="992e02ca-a0a4-4b4d-befa-33204230b0d1" containerName="watcher-decision-engine" containerID="cri-o://383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a" gracePeriod=30 Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.409609 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djb64\" (UniqueName: \"kubernetes.io/projected/0de4d37c-e2ca-42a1-9db4-782daf24105f-kube-api-access-djb64\") pod \"watcher9466-account-delete-4s6rs\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.409737 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0de4d37c-e2ca-42a1-9db4-782daf24105f-operator-scripts\") pod \"watcher9466-account-delete-4s6rs\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.512446 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0de4d37c-e2ca-42a1-9db4-782daf24105f-operator-scripts\") pod \"watcher9466-account-delete-4s6rs\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.512757 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djb64\" (UniqueName: \"kubernetes.io/projected/0de4d37c-e2ca-42a1-9db4-782daf24105f-kube-api-access-djb64\") pod \"watcher9466-account-delete-4s6rs\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.514153 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0de4d37c-e2ca-42a1-9db4-782daf24105f-operator-scripts\") pod \"watcher9466-account-delete-4s6rs\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.518184 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc9ed833-63d7-4e23-bc30-bd9cf2722903" path="/var/lib/kubelet/pods/cc9ed833-63d7-4e23-bc30-bd9cf2722903/volumes" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.540828 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djb64\" (UniqueName: \"kubernetes.io/projected/0de4d37c-e2ca-42a1-9db4-782daf24105f-kube-api-access-djb64\") pod \"watcher9466-account-delete-4s6rs\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.566364 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.749182 4925 generic.go:334] "Generic (PLEG): container finished" podID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerID="af0133f221b82f8e8d7d1bfd4b72c8c8398b721b9d116ba44ded90d3c40886b5" exitCode=143 Jan 21 11:26:09 crc kubenswrapper[4925]: I0121 11:26:09.749293 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b8ee3340-dbf7-4080-90e8-d179aaebd42a","Type":"ContainerDied","Data":"af0133f221b82f8e8d7d1bfd4b72c8c8398b721b9d116ba44ded90d3c40886b5"} Jan 21 11:26:10 crc kubenswrapper[4925]: I0121 11:26:10.281127 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher9466-account-delete-4s6rs"] Jan 21 11:26:10 crc kubenswrapper[4925]: E0121 11:26:10.532774 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:26:10 crc kubenswrapper[4925]: E0121 11:26:10.537421 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:26:10 crc kubenswrapper[4925]: E0121 11:26:10.539088 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:26:10 crc kubenswrapper[4925]: E0121 11:26:10.539146 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="008045d1-ce08-49f5-b980-d62f4d3e96ba" containerName="watcher-applier" Jan 21 11:26:10 crc kubenswrapper[4925]: I0121 11:26:10.769753 4925 generic.go:334] "Generic (PLEG): container finished" podID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerID="31bf633a2d829c9e7056a2e2f6db7505030974dcf5cb195d8a7b40359aff3499" exitCode=0 Jan 21 11:26:10 crc kubenswrapper[4925]: I0121 11:26:10.769799 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b8ee3340-dbf7-4080-90e8-d179aaebd42a","Type":"ContainerDied","Data":"31bf633a2d829c9e7056a2e2f6db7505030974dcf5cb195d8a7b40359aff3499"} Jan 21 11:26:10 crc kubenswrapper[4925]: I0121 11:26:10.781358 4925 generic.go:334] "Generic (PLEG): container finished" podID="0de4d37c-e2ca-42a1-9db4-782daf24105f" containerID="d2b72003c42b57064ec2add004d3461e5f0168dc7808e3ae51c083168392d1dd" exitCode=0 Jan 21 11:26:10 crc kubenswrapper[4925]: I0121 11:26:10.781477 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" event={"ID":"0de4d37c-e2ca-42a1-9db4-782daf24105f","Type":"ContainerDied","Data":"d2b72003c42b57064ec2add004d3461e5f0168dc7808e3ae51c083168392d1dd"} Jan 21 11:26:10 crc kubenswrapper[4925]: I0121 11:26:10.781515 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" event={"ID":"0de4d37c-e2ca-42a1-9db4-782daf24105f","Type":"ContainerStarted","Data":"7b8f3e72aafb6abe1e20352f9c9310f437d557121331d88d46e1df4dad855c5e"} Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.194833 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.359279 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-custom-prometheus-ca\") pod \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.359408 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-config-data\") pod \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.359494 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-cert-memcached-mtls\") pod \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.359543 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8wf8\" (UniqueName: \"kubernetes.io/projected/b8ee3340-dbf7-4080-90e8-d179aaebd42a-kube-api-access-m8wf8\") pod \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.359571 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-combined-ca-bundle\") pod \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.359620 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8ee3340-dbf7-4080-90e8-d179aaebd42a-logs\") pod \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\" (UID: \"b8ee3340-dbf7-4080-90e8-d179aaebd42a\") " Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.360609 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8ee3340-dbf7-4080-90e8-d179aaebd42a-logs" (OuterVolumeSpecName: "logs") pod "b8ee3340-dbf7-4080-90e8-d179aaebd42a" (UID: "b8ee3340-dbf7-4080-90e8-d179aaebd42a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.379675 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8ee3340-dbf7-4080-90e8-d179aaebd42a-kube-api-access-m8wf8" (OuterVolumeSpecName: "kube-api-access-m8wf8") pod "b8ee3340-dbf7-4080-90e8-d179aaebd42a" (UID: "b8ee3340-dbf7-4080-90e8-d179aaebd42a"). InnerVolumeSpecName "kube-api-access-m8wf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.411815 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "b8ee3340-dbf7-4080-90e8-d179aaebd42a" (UID: "b8ee3340-dbf7-4080-90e8-d179aaebd42a"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.422342 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8ee3340-dbf7-4080-90e8-d179aaebd42a" (UID: "b8ee3340-dbf7-4080-90e8-d179aaebd42a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.439652 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-config-data" (OuterVolumeSpecName: "config-data") pod "b8ee3340-dbf7-4080-90e8-d179aaebd42a" (UID: "b8ee3340-dbf7-4080-90e8-d179aaebd42a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.462602 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8wf8\" (UniqueName: \"kubernetes.io/projected/b8ee3340-dbf7-4080-90e8-d179aaebd42a-kube-api-access-m8wf8\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.462651 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.462662 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8ee3340-dbf7-4080-90e8-d179aaebd42a-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.462677 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.462689 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.491976 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "b8ee3340-dbf7-4080-90e8-d179aaebd42a" (UID: "b8ee3340-dbf7-4080-90e8-d179aaebd42a"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.563793 4925 scope.go:117] "RemoveContainer" containerID="e890de960a13e545a06f122979112b8582565c7231f881918fc64c0688f6d862" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.564480 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b8ee3340-dbf7-4080-90e8-d179aaebd42a-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.796344 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.796980 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b8ee3340-dbf7-4080-90e8-d179aaebd42a","Type":"ContainerDied","Data":"6f5f3e05e8437eff910f5101f2f6d9076a3199b6b7d7249a116a1b56d61ba93c"} Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.797061 4925 scope.go:117] "RemoveContainer" containerID="31bf633a2d829c9e7056a2e2f6db7505030974dcf5cb195d8a7b40359aff3499" Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.838563 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.851688 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:26:11 crc kubenswrapper[4925]: I0121 11:26:11.853819 4925 scope.go:117] "RemoveContainer" containerID="af0133f221b82f8e8d7d1bfd4b72c8c8398b721b9d116ba44ded90d3c40886b5" Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.253800 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.441326 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djb64\" (UniqueName: \"kubernetes.io/projected/0de4d37c-e2ca-42a1-9db4-782daf24105f-kube-api-access-djb64\") pod \"0de4d37c-e2ca-42a1-9db4-782daf24105f\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.441588 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0de4d37c-e2ca-42a1-9db4-782daf24105f-operator-scripts\") pod \"0de4d37c-e2ca-42a1-9db4-782daf24105f\" (UID: \"0de4d37c-e2ca-42a1-9db4-782daf24105f\") " Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.442511 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0de4d37c-e2ca-42a1-9db4-782daf24105f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0de4d37c-e2ca-42a1-9db4-782daf24105f" (UID: "0de4d37c-e2ca-42a1-9db4-782daf24105f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.459756 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0de4d37c-e2ca-42a1-9db4-782daf24105f-kube-api-access-djb64" (OuterVolumeSpecName: "kube-api-access-djb64") pod "0de4d37c-e2ca-42a1-9db4-782daf24105f" (UID: "0de4d37c-e2ca-42a1-9db4-782daf24105f"). InnerVolumeSpecName "kube-api-access-djb64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.544368 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djb64\" (UniqueName: \"kubernetes.io/projected/0de4d37c-e2ca-42a1-9db4-782daf24105f-kube-api-access-djb64\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.544445 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0de4d37c-e2ca-42a1-9db4-782daf24105f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:12 crc kubenswrapper[4925]: E0121 11:26:12.581992 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:26:12 crc kubenswrapper[4925]: E0121 11:26:12.583940 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:26:12 crc kubenswrapper[4925]: E0121 11:26:12.585313 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:26:12 crc kubenswrapper[4925]: E0121 11:26:12.585364 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="992e02ca-a0a4-4b4d-befa-33204230b0d1" containerName="watcher-decision-engine" Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.811611 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" event={"ID":"0de4d37c-e2ca-42a1-9db4-782daf24105f","Type":"ContainerDied","Data":"7b8f3e72aafb6abe1e20352f9c9310f437d557121331d88d46e1df4dad855c5e"} Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.812006 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b8f3e72aafb6abe1e20352f9c9310f437d557121331d88d46e1df4dad855c5e" Jan 21 11:26:12 crc kubenswrapper[4925]: I0121 11:26:12.811666 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9466-account-delete-4s6rs" Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.012926 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.013316 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-central-agent" containerID="cri-o://72a020f373b3fbe14979c6a545daba8ca789df6b9c42a9a86e5fb1c30670a47d" gracePeriod=30 Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.013414 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="proxy-httpd" containerID="cri-o://81ad7533bb89ecf78ccd3736ea8675d42e6fc92e79fd092b9383bfefd290c4e2" gracePeriod=30 Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.013422 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-notification-agent" containerID="cri-o://cc0375e0f1ae04167d986b8b109281d10dfb375710376a55b880c07fc5e7d321" gracePeriod=30 Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.013476 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="sg-core" containerID="cri-o://92f4dbaa73a9f2a272c09f85d5e444f540b2176bdb4cc9a18bf29642c7f6c8ee" gracePeriod=30 Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.513846 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" path="/var/lib/kubelet/pods/b8ee3340-dbf7-4080-90e8-d179aaebd42a/volumes" Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.826206 4925 generic.go:334] "Generic (PLEG): container finished" podID="f0dc5004-1da5-4397-a12a-57120943250e" containerID="81ad7533bb89ecf78ccd3736ea8675d42e6fc92e79fd092b9383bfefd290c4e2" exitCode=0 Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.826264 4925 generic.go:334] "Generic (PLEG): container finished" podID="f0dc5004-1da5-4397-a12a-57120943250e" containerID="92f4dbaa73a9f2a272c09f85d5e444f540b2176bdb4cc9a18bf29642c7f6c8ee" exitCode=2 Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.826276 4925 generic.go:334] "Generic (PLEG): container finished" podID="f0dc5004-1da5-4397-a12a-57120943250e" containerID="72a020f373b3fbe14979c6a545daba8ca789df6b9c42a9a86e5fb1c30670a47d" exitCode=0 Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.826277 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerDied","Data":"81ad7533bb89ecf78ccd3736ea8675d42e6fc92e79fd092b9383bfefd290c4e2"} Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.826330 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerDied","Data":"92f4dbaa73a9f2a272c09f85d5e444f540b2176bdb4cc9a18bf29642c7f6c8ee"} Jan 21 11:26:13 crc kubenswrapper[4925]: I0121 11:26:13.826341 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerDied","Data":"72a020f373b3fbe14979c6a545daba8ca789df6b9c42a9a86e5fb1c30670a47d"} Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.283500 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dxvf6"] Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.295586 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dxvf6"] Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.309553 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-9466-account-create-update-92wd2"] Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.320303 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher9466-account-delete-4s6rs"] Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.329089 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-9466-account-create-update-92wd2"] Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.338139 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher9466-account-delete-4s6rs"] Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.836045 4925 generic.go:334] "Generic (PLEG): container finished" podID="992e02ca-a0a4-4b4d-befa-33204230b0d1" containerID="383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a" exitCode=0 Jan 21 11:26:14 crc kubenswrapper[4925]: I0121 11:26:14.836088 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"992e02ca-a0a4-4b4d-befa-33204230b0d1","Type":"ContainerDied","Data":"383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a"} Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.518107 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0de4d37c-e2ca-42a1-9db4-782daf24105f" path="/var/lib/kubelet/pods/0de4d37c-e2ca-42a1-9db4-782daf24105f/volumes" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.519646 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="389e6e04-e316-4ddc-99d0-9c04f661d6b5" path="/var/lib/kubelet/pods/389e6e04-e316-4ddc-99d0-9c04f661d6b5/volumes" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.520769 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eef1032c-ab9e-4ac6-934f-f7544d835d3b" path="/var/lib/kubelet/pods/eef1032c-ab9e-4ac6-934f-f7544d835d3b/volumes" Jan 21 11:26:15 crc kubenswrapper[4925]: E0121 11:26:15.529486 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f is running failed: container process not found" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:26:15 crc kubenswrapper[4925]: E0121 11:26:15.529836 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f is running failed: container process not found" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:26:15 crc kubenswrapper[4925]: E0121 11:26:15.530099 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f is running failed: container process not found" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:26:15 crc kubenswrapper[4925]: E0121 11:26:15.530136 4925 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f is running failed: container process not found" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="008045d1-ce08-49f5-b980-d62f4d3e96ba" containerName="watcher-applier" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.591588 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.759892 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-combined-ca-bundle\") pod \"992e02ca-a0a4-4b4d-befa-33204230b0d1\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.760025 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-config-data\") pod \"992e02ca-a0a4-4b4d-befa-33204230b0d1\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.760066 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/992e02ca-a0a4-4b4d-befa-33204230b0d1-logs\") pod \"992e02ca-a0a4-4b4d-befa-33204230b0d1\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.760182 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8xvt\" (UniqueName: \"kubernetes.io/projected/992e02ca-a0a4-4b4d-befa-33204230b0d1-kube-api-access-j8xvt\") pod \"992e02ca-a0a4-4b4d-befa-33204230b0d1\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.760260 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-custom-prometheus-ca\") pod \"992e02ca-a0a4-4b4d-befa-33204230b0d1\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.760294 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-cert-memcached-mtls\") pod \"992e02ca-a0a4-4b4d-befa-33204230b0d1\" (UID: \"992e02ca-a0a4-4b4d-befa-33204230b0d1\") " Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.761514 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/992e02ca-a0a4-4b4d-befa-33204230b0d1-logs" (OuterVolumeSpecName: "logs") pod "992e02ca-a0a4-4b4d-befa-33204230b0d1" (UID: "992e02ca-a0a4-4b4d-befa-33204230b0d1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.776759 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/992e02ca-a0a4-4b4d-befa-33204230b0d1-kube-api-access-j8xvt" (OuterVolumeSpecName: "kube-api-access-j8xvt") pod "992e02ca-a0a4-4b4d-befa-33204230b0d1" (UID: "992e02ca-a0a4-4b4d-befa-33204230b0d1"). InnerVolumeSpecName "kube-api-access-j8xvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.917542 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/992e02ca-a0a4-4b4d-befa-33204230b0d1-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.917594 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8xvt\" (UniqueName: \"kubernetes.io/projected/992e02ca-a0a4-4b4d-befa-33204230b0d1-kube-api-access-j8xvt\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.928762 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "992e02ca-a0a4-4b4d-befa-33204230b0d1" (UID: "992e02ca-a0a4-4b4d-befa-33204230b0d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.931513 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "992e02ca-a0a4-4b4d-befa-33204230b0d1" (UID: "992e02ca-a0a4-4b4d-befa-33204230b0d1"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.955693 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-config-data" (OuterVolumeSpecName: "config-data") pod "992e02ca-a0a4-4b4d-befa-33204230b0d1" (UID: "992e02ca-a0a4-4b4d-befa-33204230b0d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.956729 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"992e02ca-a0a4-4b4d-befa-33204230b0d1","Type":"ContainerDied","Data":"c35f1d876671bd0cf7a8f8fae5fb803dc59f4a74f48f8a55b69cbc475d08a9dc"} Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.956795 4925 scope.go:117] "RemoveContainer" containerID="383d652ec75ba894d8751eebc2e366ffd642f6a1b5240320eb77ac0cf23e069a" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.956985 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.961930 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "992e02ca-a0a4-4b4d-befa-33204230b0d1" (UID: "992e02ca-a0a4-4b4d-befa-33204230b0d1"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.962125 4925 generic.go:334] "Generic (PLEG): container finished" podID="008045d1-ce08-49f5-b980-d62f4d3e96ba" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" exitCode=0 Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.962254 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"008045d1-ce08-49f5-b980-d62f4d3e96ba","Type":"ContainerDied","Data":"32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f"} Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.962363 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"008045d1-ce08-49f5-b980-d62f4d3e96ba","Type":"ContainerDied","Data":"505767ddbb4ec0c6a76b25fe84c65663329f584a2af9004af660a17d6cb17215"} Jan 21 11:26:15 crc kubenswrapper[4925]: I0121 11:26:15.962527 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="505767ddbb4ec0c6a76b25fe84c65663329f584a2af9004af660a17d6cb17215" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.019906 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.019957 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.019970 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.019980 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/992e02ca-a0a4-4b4d-befa-33204230b0d1-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.036233 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.224037 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-config-data\") pod \"008045d1-ce08-49f5-b980-d62f4d3e96ba\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.224128 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-combined-ca-bundle\") pod \"008045d1-ce08-49f5-b980-d62f4d3e96ba\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.224417 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ldqz\" (UniqueName: \"kubernetes.io/projected/008045d1-ce08-49f5-b980-d62f4d3e96ba-kube-api-access-2ldqz\") pod \"008045d1-ce08-49f5-b980-d62f4d3e96ba\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.224457 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-cert-memcached-mtls\") pod \"008045d1-ce08-49f5-b980-d62f4d3e96ba\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.224500 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008045d1-ce08-49f5-b980-d62f4d3e96ba-logs\") pod \"008045d1-ce08-49f5-b980-d62f4d3e96ba\" (UID: \"008045d1-ce08-49f5-b980-d62f4d3e96ba\") " Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.225238 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/008045d1-ce08-49f5-b980-d62f4d3e96ba-logs" (OuterVolumeSpecName: "logs") pod "008045d1-ce08-49f5-b980-d62f4d3e96ba" (UID: "008045d1-ce08-49f5-b980-d62f4d3e96ba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.237837 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/008045d1-ce08-49f5-b980-d62f4d3e96ba-kube-api-access-2ldqz" (OuterVolumeSpecName: "kube-api-access-2ldqz") pod "008045d1-ce08-49f5-b980-d62f4d3e96ba" (UID: "008045d1-ce08-49f5-b980-d62f4d3e96ba"). InnerVolumeSpecName "kube-api-access-2ldqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.321667 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-config-data" (OuterVolumeSpecName: "config-data") pod "008045d1-ce08-49f5-b980-d62f4d3e96ba" (UID: "008045d1-ce08-49f5-b980-d62f4d3e96ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.329008 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ldqz\" (UniqueName: \"kubernetes.io/projected/008045d1-ce08-49f5-b980-d62f4d3e96ba-kube-api-access-2ldqz\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.329042 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008045d1-ce08-49f5-b980-d62f4d3e96ba-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.329055 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.380617 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "008045d1-ce08-49f5-b980-d62f4d3e96ba" (UID: "008045d1-ce08-49f5-b980-d62f4d3e96ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.382609 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "008045d1-ce08-49f5-b980-d62f4d3e96ba" (UID: "008045d1-ce08-49f5-b980-d62f4d3e96ba"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.411624 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.419180 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.430150 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.430187 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008045d1-ce08-49f5-b980-d62f4d3e96ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.980065 4925 generic.go:334] "Generic (PLEG): container finished" podID="f0dc5004-1da5-4397-a12a-57120943250e" containerID="cc0375e0f1ae04167d986b8b109281d10dfb375710376a55b880c07fc5e7d321" exitCode=0 Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.980175 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerDied","Data":"cc0375e0f1ae04167d986b8b109281d10dfb375710376a55b880c07fc5e7d321"} Jan 21 11:26:16 crc kubenswrapper[4925]: I0121 11:26:16.981811 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.020656 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.030273 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.447295 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506671 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf8c6\" (UniqueName: \"kubernetes.io/projected/f0dc5004-1da5-4397-a12a-57120943250e-kube-api-access-bf8c6\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506729 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-ceilometer-tls-certs\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506794 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-scripts\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506814 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-sg-core-conf-yaml\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506849 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-run-httpd\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506914 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-combined-ca-bundle\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506963 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-log-httpd\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.506992 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-config-data\") pod \"f0dc5004-1da5-4397-a12a-57120943250e\" (UID: \"f0dc5004-1da5-4397-a12a-57120943250e\") " Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.510846 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.515054 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.516980 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0dc5004-1da5-4397-a12a-57120943250e-kube-api-access-bf8c6" (OuterVolumeSpecName: "kube-api-access-bf8c6") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "kube-api-access-bf8c6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.517202 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-scripts" (OuterVolumeSpecName: "scripts") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.519926 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="008045d1-ce08-49f5-b980-d62f4d3e96ba" path="/var/lib/kubelet/pods/008045d1-ce08-49f5-b980-d62f4d3e96ba/volumes" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.520838 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="992e02ca-a0a4-4b4d-befa-33204230b0d1" path="/var/lib/kubelet/pods/992e02ca-a0a4-4b4d-befa-33204230b0d1/volumes" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.541535 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.565682 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.624263 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.630919 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.630969 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf8c6\" (UniqueName: \"kubernetes.io/projected/f0dc5004-1da5-4397-a12a-57120943250e-kube-api-access-bf8c6\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.630984 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.630997 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.631014 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.631026 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0dc5004-1da5-4397-a12a-57120943250e-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.631041 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.654577 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-config-data" (OuterVolumeSpecName: "config-data") pod "f0dc5004-1da5-4397-a12a-57120943250e" (UID: "f0dc5004-1da5-4397-a12a-57120943250e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.732267 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0dc5004-1da5-4397-a12a-57120943250e-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.995071 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f0dc5004-1da5-4397-a12a-57120943250e","Type":"ContainerDied","Data":"cae1af74f635b2490a4a5226b82615add1f9ede8c39645653d4ba2e4becffbc8"} Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.995150 4925 scope.go:117] "RemoveContainer" containerID="81ad7533bb89ecf78ccd3736ea8675d42e6fc92e79fd092b9383bfefd290c4e2" Jan 21 11:26:17 crc kubenswrapper[4925]: I0121 11:26:17.995192 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.094692 4925 scope.go:117] "RemoveContainer" containerID="92f4dbaa73a9f2a272c09f85d5e444f540b2176bdb4cc9a18bf29642c7f6c8ee" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.102438 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.109649 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.126197 4925 scope.go:117] "RemoveContainer" containerID="cc0375e0f1ae04167d986b8b109281d10dfb375710376a55b880c07fc5e7d321" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.133581 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.134291 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-notification-agent" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.134380 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-notification-agent" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.134477 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de4d37c-e2ca-42a1-9db4-782daf24105f" containerName="mariadb-account-delete" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.134536 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de4d37c-e2ca-42a1-9db4-782daf24105f" containerName="mariadb-account-delete" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.134602 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-kuttl-api-log" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.134662 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-kuttl-api-log" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.134739 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-api" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.134803 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-api" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.134886 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="sg-core" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.134951 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="sg-core" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.135013 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="008045d1-ce08-49f5-b980-d62f4d3e96ba" containerName="watcher-applier" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.135075 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="008045d1-ce08-49f5-b980-d62f4d3e96ba" containerName="watcher-applier" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.135141 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="proxy-httpd" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.135219 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="proxy-httpd" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.135294 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="992e02ca-a0a4-4b4d-befa-33204230b0d1" containerName="watcher-decision-engine" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.135351 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="992e02ca-a0a4-4b4d-befa-33204230b0d1" containerName="watcher-decision-engine" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.135456 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-central-agent" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.135527 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-central-agent" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.135749 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="008045d1-ce08-49f5-b980-d62f4d3e96ba" containerName="watcher-applier" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.135842 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="proxy-httpd" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.135945 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-central-agent" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.136042 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="992e02ca-a0a4-4b4d-befa-33204230b0d1" containerName="watcher-decision-engine" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.136155 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="sg-core" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.136226 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-api" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.136301 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8ee3340-dbf7-4080-90e8-d179aaebd42a" containerName="watcher-kuttl-api-log" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.136380 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0dc5004-1da5-4397-a12a-57120943250e" containerName="ceilometer-notification-agent" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.136464 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de4d37c-e2ca-42a1-9db4-782daf24105f" containerName="mariadb-account-delete" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.140093 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.146494 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.147069 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.147798 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.154578 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.169228 4925 scope.go:117] "RemoveContainer" containerID="72a020f373b3fbe14979c6a545daba8ca789df6b9c42a9a86e5fb1c30670a47d" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.264815 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-scripts\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.264868 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.264889 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-config-data\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.264912 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.265037 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-log-httpd\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.265258 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-run-httpd\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.265343 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.265501 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqqxn\" (UniqueName: \"kubernetes.io/projected/1019a31b-2bb6-4e0b-bf17-950052095e18-kube-api-access-cqqxn\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.290538 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-8pgtn"] Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.292507 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.296922 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-9a3f-account-create-update-w297x"] Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.298072 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.299652 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.308338 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-8pgtn"] Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.319477 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-9a3f-account-create-update-w297x"] Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.366929 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367060 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqqxn\" (UniqueName: \"kubernetes.io/projected/1019a31b-2bb6-4e0b-bf17-950052095e18-kube-api-access-cqqxn\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367123 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-scripts\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367152 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367173 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-config-data\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367219 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367257 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-log-httpd\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367337 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-run-httpd\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.367874 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-log-httpd\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.368005 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-run-httpd\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.375790 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.376742 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-scripts\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.377646 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.380641 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-config-data\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.398552 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.400913 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqqxn\" (UniqueName: \"kubernetes.io/projected/1019a31b-2bb6-4e0b-bf17-950052095e18-kube-api-access-cqqxn\") pod \"ceilometer-0\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.463620 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.468657 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-operator-scripts\") pod \"watcher-db-create-8pgtn\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.468825 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qqtn\" (UniqueName: \"kubernetes.io/projected/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-kube-api-access-9qqtn\") pod \"watcher-db-create-8pgtn\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.469068 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdhjs\" (UniqueName: \"kubernetes.io/projected/996cd8c6-1a8f-491b-a556-3c645f3b94d1-kube-api-access-qdhjs\") pod \"watcher-9a3f-account-create-update-w297x\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.469142 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/996cd8c6-1a8f-491b-a556-3c645f3b94d1-operator-scripts\") pod \"watcher-9a3f-account-create-update-w297x\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.502629 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:26:18 crc kubenswrapper[4925]: E0121 11:26:18.503144 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.571356 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdhjs\" (UniqueName: \"kubernetes.io/projected/996cd8c6-1a8f-491b-a556-3c645f3b94d1-kube-api-access-qdhjs\") pod \"watcher-9a3f-account-create-update-w297x\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.571470 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/996cd8c6-1a8f-491b-a556-3c645f3b94d1-operator-scripts\") pod \"watcher-9a3f-account-create-update-w297x\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.571798 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-operator-scripts\") pod \"watcher-db-create-8pgtn\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.571858 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qqtn\" (UniqueName: \"kubernetes.io/projected/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-kube-api-access-9qqtn\") pod \"watcher-db-create-8pgtn\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.572968 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-operator-scripts\") pod \"watcher-db-create-8pgtn\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.572982 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/996cd8c6-1a8f-491b-a556-3c645f3b94d1-operator-scripts\") pod \"watcher-9a3f-account-create-update-w297x\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.600331 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdhjs\" (UniqueName: \"kubernetes.io/projected/996cd8c6-1a8f-491b-a556-3c645f3b94d1-kube-api-access-qdhjs\") pod \"watcher-9a3f-account-create-update-w297x\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.607080 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qqtn\" (UniqueName: \"kubernetes.io/projected/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-kube-api-access-9qqtn\") pod \"watcher-db-create-8pgtn\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.609996 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:18 crc kubenswrapper[4925]: I0121 11:26:18.620537 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:19 crc kubenswrapper[4925]: I0121 11:26:19.135338 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:26:19 crc kubenswrapper[4925]: I0121 11:26:19.350603 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-9a3f-account-create-update-w297x"] Jan 21 11:26:19 crc kubenswrapper[4925]: W0121 11:26:19.365908 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod996cd8c6_1a8f_491b_a556_3c645f3b94d1.slice/crio-ddcacff2a7ecb0ce1d942ac5dd6f24c86a1cec9916dafb9d093dd96a587e079d WatchSource:0}: Error finding container ddcacff2a7ecb0ce1d942ac5dd6f24c86a1cec9916dafb9d093dd96a587e079d: Status 404 returned error can't find the container with id ddcacff2a7ecb0ce1d942ac5dd6f24c86a1cec9916dafb9d093dd96a587e079d Jan 21 11:26:19 crc kubenswrapper[4925]: I0121 11:26:19.668538 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0dc5004-1da5-4397-a12a-57120943250e" path="/var/lib/kubelet/pods/f0dc5004-1da5-4397-a12a-57120943250e/volumes" Jan 21 11:26:19 crc kubenswrapper[4925]: I0121 11:26:19.669776 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-8pgtn"] Jan 21 11:26:20 crc kubenswrapper[4925]: I0121 11:26:20.025751 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-8pgtn" event={"ID":"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1","Type":"ContainerStarted","Data":"81e795a5e6260a16dcc31fa2870a6eb6c3a88eed42be3038da4b24fbb2b4fc3a"} Jan 21 11:26:20 crc kubenswrapper[4925]: I0121 11:26:20.028745 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" event={"ID":"996cd8c6-1a8f-491b-a556-3c645f3b94d1","Type":"ContainerStarted","Data":"ddcacff2a7ecb0ce1d942ac5dd6f24c86a1cec9916dafb9d093dd96a587e079d"} Jan 21 11:26:20 crc kubenswrapper[4925]: I0121 11:26:20.030259 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerStarted","Data":"f14c740cfb34a91f57ab94dfd22ddd7d89b74a3a412dcafbb09291b36caf3402"} Jan 21 11:26:21 crc kubenswrapper[4925]: I0121 11:26:21.041735 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerStarted","Data":"5ed03fbb64eb1b3089e897600e49432c63a04d574844ba1fe896bffd0dbfa385"} Jan 21 11:26:21 crc kubenswrapper[4925]: I0121 11:26:21.047137 4925 generic.go:334] "Generic (PLEG): container finished" podID="c898cc46-eec6-42b0-a5a0-73ae13b0d2b1" containerID="e060760729d1d90f3b8bfec85c18c8e9d92dfa37150de6540dd2bb87ca2cd627" exitCode=0 Jan 21 11:26:21 crc kubenswrapper[4925]: I0121 11:26:21.047272 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-8pgtn" event={"ID":"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1","Type":"ContainerDied","Data":"e060760729d1d90f3b8bfec85c18c8e9d92dfa37150de6540dd2bb87ca2cd627"} Jan 21 11:26:21 crc kubenswrapper[4925]: I0121 11:26:21.049481 4925 generic.go:334] "Generic (PLEG): container finished" podID="996cd8c6-1a8f-491b-a556-3c645f3b94d1" containerID="389a7210a2a6ab5ba90a1a2c0e1a88de2a7ec3eca48bcaf1bd962604589e5bbf" exitCode=0 Jan 21 11:26:21 crc kubenswrapper[4925]: I0121 11:26:21.049527 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" event={"ID":"996cd8c6-1a8f-491b-a556-3c645f3b94d1","Type":"ContainerDied","Data":"389a7210a2a6ab5ba90a1a2c0e1a88de2a7ec3eca48bcaf1bd962604589e5bbf"} Jan 21 11:26:22 crc kubenswrapper[4925]: I0121 11:26:22.061656 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerStarted","Data":"5a931a602f84dec188a493ab23928a2e795a0293644f8ac20e3a9b9a0457dfbe"} Jan 21 11:26:22 crc kubenswrapper[4925]: I0121 11:26:22.969802 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.016265 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-operator-scripts\") pod \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.016344 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qqtn\" (UniqueName: \"kubernetes.io/projected/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-kube-api-access-9qqtn\") pod \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\" (UID: \"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1\") " Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.019223 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c898cc46-eec6-42b0-a5a0-73ae13b0d2b1" (UID: "c898cc46-eec6-42b0-a5a0-73ae13b0d2b1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.040745 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-kube-api-access-9qqtn" (OuterVolumeSpecName: "kube-api-access-9qqtn") pod "c898cc46-eec6-42b0-a5a0-73ae13b0d2b1" (UID: "c898cc46-eec6-42b0-a5a0-73ae13b0d2b1"). InnerVolumeSpecName "kube-api-access-9qqtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.044378 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.085258 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" event={"ID":"996cd8c6-1a8f-491b-a556-3c645f3b94d1","Type":"ContainerDied","Data":"ddcacff2a7ecb0ce1d942ac5dd6f24c86a1cec9916dafb9d093dd96a587e079d"} Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.085311 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddcacff2a7ecb0ce1d942ac5dd6f24c86a1cec9916dafb9d093dd96a587e079d" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.085384 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-9a3f-account-create-update-w297x" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.088662 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerStarted","Data":"108d090570a3f84e078afe94a503335e8d287d0cef29aefefb0ebf280802d9fd"} Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.092855 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-8pgtn" event={"ID":"c898cc46-eec6-42b0-a5a0-73ae13b0d2b1","Type":"ContainerDied","Data":"81e795a5e6260a16dcc31fa2870a6eb6c3a88eed42be3038da4b24fbb2b4fc3a"} Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.092903 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81e795a5e6260a16dcc31fa2870a6eb6c3a88eed42be3038da4b24fbb2b4fc3a" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.092976 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-8pgtn" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.118122 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdhjs\" (UniqueName: \"kubernetes.io/projected/996cd8c6-1a8f-491b-a556-3c645f3b94d1-kube-api-access-qdhjs\") pod \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.118216 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/996cd8c6-1a8f-491b-a556-3c645f3b94d1-operator-scripts\") pod \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\" (UID: \"996cd8c6-1a8f-491b-a556-3c645f3b94d1\") " Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.118862 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.118961 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qqtn\" (UniqueName: \"kubernetes.io/projected/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1-kube-api-access-9qqtn\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.119266 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/996cd8c6-1a8f-491b-a556-3c645f3b94d1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "996cd8c6-1a8f-491b-a556-3c645f3b94d1" (UID: "996cd8c6-1a8f-491b-a556-3c645f3b94d1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.122731 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/996cd8c6-1a8f-491b-a556-3c645f3b94d1-kube-api-access-qdhjs" (OuterVolumeSpecName: "kube-api-access-qdhjs") pod "996cd8c6-1a8f-491b-a556-3c645f3b94d1" (UID: "996cd8c6-1a8f-491b-a556-3c645f3b94d1"). InnerVolumeSpecName "kube-api-access-qdhjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.220647 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdhjs\" (UniqueName: \"kubernetes.io/projected/996cd8c6-1a8f-491b-a556-3c645f3b94d1-kube-api-access-qdhjs\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:23 crc kubenswrapper[4925]: I0121 11:26:23.220705 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/996cd8c6-1a8f-491b-a556-3c645f3b94d1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:26:24 crc kubenswrapper[4925]: I0121 11:26:24.106495 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerStarted","Data":"32d76203f4ad83584d070c8654ebdd08cce9cb4596bdf4833a018e3ccbcf3ca9"} Jan 21 11:26:24 crc kubenswrapper[4925]: I0121 11:26:24.106791 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:24 crc kubenswrapper[4925]: I0121 11:26:24.138251 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.652642251 podStartE2EDuration="6.138215954s" podCreationTimestamp="2026-01-21 11:26:18 +0000 UTC" firstStartedPulling="2026-01-21 11:26:19.136621901 +0000 UTC m=+1870.740513835" lastFinishedPulling="2026-01-21 11:26:23.622195604 +0000 UTC m=+1875.226087538" observedRunningTime="2026-01-21 11:26:24.133791794 +0000 UTC m=+1875.737683728" watchObservedRunningTime="2026-01-21 11:26:24.138215954 +0000 UTC m=+1875.742107898" Jan 21 11:26:32 crc kubenswrapper[4925]: I0121 11:26:32.502479 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:26:32 crc kubenswrapper[4925]: E0121 11:26:32.503607 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:26:46 crc kubenswrapper[4925]: I0121 11:26:46.502184 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:26:46 crc kubenswrapper[4925]: E0121 11:26:46.504150 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:26:48 crc kubenswrapper[4925]: I0121 11:26:48.476118 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:26:57 crc kubenswrapper[4925]: I0121 11:26:57.501620 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:26:57 crc kubenswrapper[4925]: E0121 11:26:57.502596 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:27:09 crc kubenswrapper[4925]: I0121 11:27:09.509367 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:27:09 crc kubenswrapper[4925]: E0121 11:27:09.510321 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:27:11 crc kubenswrapper[4925]: I0121 11:27:11.795230 4925 scope.go:117] "RemoveContainer" containerID="86a16f3ff2d07884a507606696ce02015881f320482be8d52f82127545582bfa" Jan 21 11:27:21 crc kubenswrapper[4925]: I0121 11:27:21.502475 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:27:21 crc kubenswrapper[4925]: E0121 11:27:21.503461 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:27:35 crc kubenswrapper[4925]: I0121 11:27:35.502936 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:27:35 crc kubenswrapper[4925]: E0121 11:27:35.504342 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:27:49 crc kubenswrapper[4925]: I0121 11:27:49.508979 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:27:49 crc kubenswrapper[4925]: E0121 11:27:49.509589 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:28:02 crc kubenswrapper[4925]: I0121 11:28:02.501607 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:28:02 crc kubenswrapper[4925]: E0121 11:28:02.502301 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:28:11 crc kubenswrapper[4925]: I0121 11:28:11.944464 4925 scope.go:117] "RemoveContainer" containerID="9cbd272345e52a2e9c3ecffa77cd1497804997c6b1e19e1a1d049859cd0a2308" Jan 21 11:28:11 crc kubenswrapper[4925]: I0121 11:28:11.989543 4925 scope.go:117] "RemoveContainer" containerID="5aaa4ff8321e9b754a751cd0dee72534902cc36e45dd13c02859591cba089159" Jan 21 11:28:12 crc kubenswrapper[4925]: I0121 11:28:12.027704 4925 scope.go:117] "RemoveContainer" containerID="a26b6af523be5c6783095117bd8aaf9237989807a026f8ab07b6d3d8d781857e" Jan 21 11:28:12 crc kubenswrapper[4925]: I0121 11:28:12.048216 4925 scope.go:117] "RemoveContainer" containerID="2586edbc6114d143017f7c5a549a1edc7a7107ffb92de81c74424119b8fea061" Jan 21 11:28:12 crc kubenswrapper[4925]: I0121 11:28:12.090968 4925 scope.go:117] "RemoveContainer" containerID="e09d24ecd59c23c019bfbe66b298fa3c89d87fa284a9d00016af7f3b48361e8b" Jan 21 11:28:14 crc kubenswrapper[4925]: I0121 11:28:14.503334 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:28:14 crc kubenswrapper[4925]: E0121 11:28:14.504022 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:28:29 crc kubenswrapper[4925]: I0121 11:28:29.509232 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:28:29 crc kubenswrapper[4925]: E0121 11:28:29.510610 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:28:40 crc kubenswrapper[4925]: I0121 11:28:40.501849 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:28:40 crc kubenswrapper[4925]: E0121 11:28:40.502705 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:28:51 crc kubenswrapper[4925]: I0121 11:28:51.502217 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:28:51 crc kubenswrapper[4925]: E0121 11:28:51.503028 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:29:02 crc kubenswrapper[4925]: I0121 11:29:02.501663 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:29:02 crc kubenswrapper[4925]: E0121 11:29:02.502569 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:29:12 crc kubenswrapper[4925]: I0121 11:29:12.178169 4925 scope.go:117] "RemoveContainer" containerID="406322ec23c25f5353fb536582332bf28b383f3958044207ddeeec43e03be3ee" Jan 21 11:29:12 crc kubenswrapper[4925]: I0121 11:29:12.285206 4925 scope.go:117] "RemoveContainer" containerID="1457d1c02449f1eecdfa9afa3541f9fe55c02e3bd06e080f2b8d421a73dabaf6" Jan 21 11:29:12 crc kubenswrapper[4925]: I0121 11:29:12.338746 4925 scope.go:117] "RemoveContainer" containerID="9ca41a9bed7c069da2e84f8de9cb4942c4c3e2eaaa352437ea8573ded9caa68e" Jan 21 11:29:12 crc kubenswrapper[4925]: I0121 11:29:12.407051 4925 scope.go:117] "RemoveContainer" containerID="6cd55a5f5399cc8bab7fa9585f858c36db33eafc98d1b058a3d8425fed4a13b6" Jan 21 11:29:12 crc kubenswrapper[4925]: I0121 11:29:12.432663 4925 scope.go:117] "RemoveContainer" containerID="fb5d956f3ade8c5da24664b0feab7ccd5bd4a9346b68fea3a684132b2ff7907e" Jan 21 11:29:15 crc kubenswrapper[4925]: I0121 11:29:15.502113 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:29:15 crc kubenswrapper[4925]: E0121 11:29:15.502686 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:29:22 crc kubenswrapper[4925]: I0121 11:29:22.062146 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/root-account-create-update-d8bm7"] Jan 21 11:29:22 crc kubenswrapper[4925]: I0121 11:29:22.071662 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn"] Jan 21 11:29:22 crc kubenswrapper[4925]: I0121 11:29:22.082526 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/root-account-create-update-d8bm7"] Jan 21 11:29:22 crc kubenswrapper[4925]: I0121 11:29:22.090933 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-7dc6-account-create-update-7k5hn"] Jan 21 11:29:22 crc kubenswrapper[4925]: I0121 11:29:22.098596 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-db-create-rjq9w"] Jan 21 11:29:22 crc kubenswrapper[4925]: I0121 11:29:22.107361 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-db-create-rjq9w"] Jan 21 11:29:23 crc kubenswrapper[4925]: I0121 11:29:23.513099 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23a3b777-9ddf-4df2-842a-b9e29e1b7aa0" path="/var/lib/kubelet/pods/23a3b777-9ddf-4df2-842a-b9e29e1b7aa0/volumes" Jan 21 11:29:23 crc kubenswrapper[4925]: I0121 11:29:23.514375 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46a8fc8b-5efe-47ee-800e-fd7372a3bc4b" path="/var/lib/kubelet/pods/46a8fc8b-5efe-47ee-800e-fd7372a3bc4b/volumes" Jan 21 11:29:23 crc kubenswrapper[4925]: I0121 11:29:23.515262 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6426427a-7aeb-4bf8-8850-8dd1fbf82adc" path="/var/lib/kubelet/pods/6426427a-7aeb-4bf8-8850-8dd1fbf82adc/volumes" Jan 21 11:29:27 crc kubenswrapper[4925]: I0121 11:29:27.502130 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:29:28 crc kubenswrapper[4925]: I0121 11:29:28.821476 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"14144a36600bd7d5b9a71777ea7bcad1b2af7e52667e89f48ae846cc78fbbc2d"} Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.218819 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9"] Jan 21 11:30:00 crc kubenswrapper[4925]: E0121 11:30:00.219989 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="996cd8c6-1a8f-491b-a556-3c645f3b94d1" containerName="mariadb-account-create-update" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.220011 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="996cd8c6-1a8f-491b-a556-3c645f3b94d1" containerName="mariadb-account-create-update" Jan 21 11:30:00 crc kubenswrapper[4925]: E0121 11:30:00.220033 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c898cc46-eec6-42b0-a5a0-73ae13b0d2b1" containerName="mariadb-database-create" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.220052 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c898cc46-eec6-42b0-a5a0-73ae13b0d2b1" containerName="mariadb-database-create" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.220292 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="996cd8c6-1a8f-491b-a556-3c645f3b94d1" containerName="mariadb-account-create-update" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.220331 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c898cc46-eec6-42b0-a5a0-73ae13b0d2b1" containerName="mariadb-database-create" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.221416 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.226419 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.228863 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.231366 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9"] Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.357868 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5195f4a-967f-42f5-9402-9d4473fb49c5-secret-volume\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.358152 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5195f4a-967f-42f5-9402-9d4473fb49c5-config-volume\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.358524 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qggnz\" (UniqueName: \"kubernetes.io/projected/e5195f4a-967f-42f5-9402-9d4473fb49c5-kube-api-access-qggnz\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.460083 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5195f4a-967f-42f5-9402-9d4473fb49c5-secret-volume\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.460215 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5195f4a-967f-42f5-9402-9d4473fb49c5-config-volume\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.460339 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qggnz\" (UniqueName: \"kubernetes.io/projected/e5195f4a-967f-42f5-9402-9d4473fb49c5-kube-api-access-qggnz\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.461937 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5195f4a-967f-42f5-9402-9d4473fb49c5-config-volume\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.469562 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5195f4a-967f-42f5-9402-9d4473fb49c5-secret-volume\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.480311 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qggnz\" (UniqueName: \"kubernetes.io/projected/e5195f4a-967f-42f5-9402-9d4473fb49c5-kube-api-access-qggnz\") pod \"collect-profiles-29483250-dhdz9\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:00 crc kubenswrapper[4925]: I0121 11:30:00.546103 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:01 crc kubenswrapper[4925]: I0121 11:30:01.166281 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9"] Jan 21 11:30:01 crc kubenswrapper[4925]: I0121 11:30:01.488658 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" event={"ID":"e5195f4a-967f-42f5-9402-9d4473fb49c5","Type":"ContainerStarted","Data":"4c5ea9675562eadefa8f4db8bcf51d9034008d394324a3b966e93bd838d0d089"} Jan 21 11:30:02 crc kubenswrapper[4925]: I0121 11:30:02.499588 4925 generic.go:334] "Generic (PLEG): container finished" podID="e5195f4a-967f-42f5-9402-9d4473fb49c5" containerID="8b13216693667775f7b24ef39973fad9d1b6f53a54e22559bbe0f6771524b418" exitCode=0 Jan 21 11:30:02 crc kubenswrapper[4925]: I0121 11:30:02.499672 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" event={"ID":"e5195f4a-967f-42f5-9402-9d4473fb49c5","Type":"ContainerDied","Data":"8b13216693667775f7b24ef39973fad9d1b6f53a54e22559bbe0f6771524b418"} Jan 21 11:30:03 crc kubenswrapper[4925]: I0121 11:30:03.832496 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.000748 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5195f4a-967f-42f5-9402-9d4473fb49c5-config-volume\") pod \"e5195f4a-967f-42f5-9402-9d4473fb49c5\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.000857 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5195f4a-967f-42f5-9402-9d4473fb49c5-secret-volume\") pod \"e5195f4a-967f-42f5-9402-9d4473fb49c5\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.001039 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qggnz\" (UniqueName: \"kubernetes.io/projected/e5195f4a-967f-42f5-9402-9d4473fb49c5-kube-api-access-qggnz\") pod \"e5195f4a-967f-42f5-9402-9d4473fb49c5\" (UID: \"e5195f4a-967f-42f5-9402-9d4473fb49c5\") " Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.001768 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5195f4a-967f-42f5-9402-9d4473fb49c5-config-volume" (OuterVolumeSpecName: "config-volume") pod "e5195f4a-967f-42f5-9402-9d4473fb49c5" (UID: "e5195f4a-967f-42f5-9402-9d4473fb49c5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.007175 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5195f4a-967f-42f5-9402-9d4473fb49c5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e5195f4a-967f-42f5-9402-9d4473fb49c5" (UID: "e5195f4a-967f-42f5-9402-9d4473fb49c5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.018516 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5195f4a-967f-42f5-9402-9d4473fb49c5-kube-api-access-qggnz" (OuterVolumeSpecName: "kube-api-access-qggnz") pod "e5195f4a-967f-42f5-9402-9d4473fb49c5" (UID: "e5195f4a-967f-42f5-9402-9d4473fb49c5"). InnerVolumeSpecName "kube-api-access-qggnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.102995 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e5195f4a-967f-42f5-9402-9d4473fb49c5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.103562 4925 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e5195f4a-967f-42f5-9402-9d4473fb49c5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.103594 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qggnz\" (UniqueName: \"kubernetes.io/projected/e5195f4a-967f-42f5-9402-9d4473fb49c5-kube-api-access-qggnz\") on node \"crc\" DevicePath \"\"" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.524476 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" event={"ID":"e5195f4a-967f-42f5-9402-9d4473fb49c5","Type":"ContainerDied","Data":"4c5ea9675562eadefa8f4db8bcf51d9034008d394324a3b966e93bd838d0d089"} Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.524957 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c5ea9675562eadefa8f4db8bcf51d9034008d394324a3b966e93bd838d0d089" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.524867 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9" Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.940717 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l"] Jan 21 11:30:04 crc kubenswrapper[4925]: I0121 11:30:04.955044 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483205-hjb8l"] Jan 21 11:30:05 crc kubenswrapper[4925]: I0121 11:30:05.561491 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee785c06-3ec0-4917-a762-a5a8c178b95a" path="/var/lib/kubelet/pods/ee785c06-3ec0-4917-a762-a5a8c178b95a/volumes" Jan 21 11:30:10 crc kubenswrapper[4925]: I0121 11:30:10.036111 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-jnj2q"] Jan 21 11:30:10 crc kubenswrapper[4925]: I0121 11:30:10.045965 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-db-sync-jnj2q"] Jan 21 11:30:11 crc kubenswrapper[4925]: I0121 11:30:11.514078 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82ae5abc-2167-42bc-9613-27710d083439" path="/var/lib/kubelet/pods/82ae5abc-2167-42bc-9613-27710d083439/volumes" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.613439 4925 scope.go:117] "RemoveContainer" containerID="13f4a0ae49b8c0a72a8d5c2931975781ef05f02fed019d43463e851e64cc0acd" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.640342 4925 scope.go:117] "RemoveContainer" containerID="0b9ede477d3ca1e2a2a9d0a1751087b24b6fd77c9a69304789a10f27aab387f0" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.682815 4925 scope.go:117] "RemoveContainer" containerID="1c9b9fff6d436b0ebb69e9dd1b1065cc25421d575000bb067efcbd7a135f4eaa" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.740486 4925 scope.go:117] "RemoveContainer" containerID="7632f6d38b496a4fd7cd16b8fe205f70aad7831f6cdcd0bc64e95e4954090d65" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.764572 4925 scope.go:117] "RemoveContainer" containerID="df7b434e83706b5ac0a5c132941d89bc3f8881c1d3b4d6b26e6f9c2d031f03ac" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.806961 4925 scope.go:117] "RemoveContainer" containerID="6a1cd23d9ec366c434b4f0b8ee68b1607b1bce8c619010bad469a4345b6bb361" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.856777 4925 scope.go:117] "RemoveContainer" containerID="7b566d6b5ae2343813c4f7835491ac1836c190bb29cae139fef0f7e3ff6ebd4b" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.886516 4925 scope.go:117] "RemoveContainer" containerID="f161dbb0caed16e21e52b7f074e0d1c9d7c19e159c456f1571757b698d1efee6" Jan 21 11:30:12 crc kubenswrapper[4925]: I0121 11:30:12.911955 4925 scope.go:117] "RemoveContainer" containerID="98bdf7507f13b94744ccd35c4ea0957f32337b9e793f58da78e857377ac49bf5" Jan 21 11:31:13 crc kubenswrapper[4925]: I0121 11:31:13.111173 4925 scope.go:117] "RemoveContainer" containerID="32a021a34444c13c9b02acf92167dc449420519727c141bd3a2c61c1b77e817f" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.278727 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher9a3f-account-delete-g97mj"] Jan 21 11:31:18 crc kubenswrapper[4925]: E0121 11:31:18.279837 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5195f4a-967f-42f5-9402-9d4473fb49c5" containerName="collect-profiles" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.279857 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5195f4a-967f-42f5-9402-9d4473fb49c5" containerName="collect-profiles" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.280096 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5195f4a-967f-42f5-9402-9d4473fb49c5" containerName="collect-profiles" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.281102 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.295670 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher9a3f-account-delete-g97mj"] Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.494069 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kr5j\" (UniqueName: \"kubernetes.io/projected/9bd2e937-9db1-4425-93b0-3cde8aae9565-kube-api-access-7kr5j\") pod \"watcher9a3f-account-delete-g97mj\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.494711 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9bd2e937-9db1-4425-93b0-3cde8aae9565-operator-scripts\") pod \"watcher9a3f-account-delete-g97mj\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.599823 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kr5j\" (UniqueName: \"kubernetes.io/projected/9bd2e937-9db1-4425-93b0-3cde8aae9565-kube-api-access-7kr5j\") pod \"watcher9a3f-account-delete-g97mj\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.600353 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9bd2e937-9db1-4425-93b0-3cde8aae9565-operator-scripts\") pod \"watcher9a3f-account-delete-g97mj\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.601761 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9bd2e937-9db1-4425-93b0-3cde8aae9565-operator-scripts\") pod \"watcher9a3f-account-delete-g97mj\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.627247 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kr5j\" (UniqueName: \"kubernetes.io/projected/9bd2e937-9db1-4425-93b0-3cde8aae9565-kube-api-access-7kr5j\") pod \"watcher9a3f-account-delete-g97mj\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:18 crc kubenswrapper[4925]: I0121 11:31:18.901047 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:19 crc kubenswrapper[4925]: I0121 11:31:19.459015 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher9a3f-account-delete-g97mj"] Jan 21 11:31:19 crc kubenswrapper[4925]: I0121 11:31:19.791665 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" event={"ID":"9bd2e937-9db1-4425-93b0-3cde8aae9565","Type":"ContainerStarted","Data":"b2ed6a7944a7c4a09c4e9a3747465c899d29a1f8d6828ac959d2693fb4f85bf9"} Jan 21 11:31:19 crc kubenswrapper[4925]: I0121 11:31:19.791723 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" event={"ID":"9bd2e937-9db1-4425-93b0-3cde8aae9565","Type":"ContainerStarted","Data":"bb697f6910742e4c37d764d4ac611df8703eca76e3d9c9fe5343481f70fa59c0"} Jan 21 11:31:19 crc kubenswrapper[4925]: I0121 11:31:19.817501 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" podStartSLOduration=1.817432771 podStartE2EDuration="1.817432771s" podCreationTimestamp="2026-01-21 11:31:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:31:19.814374454 +0000 UTC m=+2171.418266398" watchObservedRunningTime="2026-01-21 11:31:19.817432771 +0000 UTC m=+2171.421324705" Jan 21 11:31:20 crc kubenswrapper[4925]: I0121 11:31:20.803144 4925 generic.go:334] "Generic (PLEG): container finished" podID="9bd2e937-9db1-4425-93b0-3cde8aae9565" containerID="b2ed6a7944a7c4a09c4e9a3747465c899d29a1f8d6828ac959d2693fb4f85bf9" exitCode=0 Jan 21 11:31:20 crc kubenswrapper[4925]: I0121 11:31:20.803201 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" event={"ID":"9bd2e937-9db1-4425-93b0-3cde8aae9565","Type":"ContainerDied","Data":"b2ed6a7944a7c4a09c4e9a3747465c899d29a1f8d6828ac959d2693fb4f85bf9"} Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.464170 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.470415 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9bd2e937-9db1-4425-93b0-3cde8aae9565-operator-scripts\") pod \"9bd2e937-9db1-4425-93b0-3cde8aae9565\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.470538 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kr5j\" (UniqueName: \"kubernetes.io/projected/9bd2e937-9db1-4425-93b0-3cde8aae9565-kube-api-access-7kr5j\") pod \"9bd2e937-9db1-4425-93b0-3cde8aae9565\" (UID: \"9bd2e937-9db1-4425-93b0-3cde8aae9565\") " Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.472482 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bd2e937-9db1-4425-93b0-3cde8aae9565-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9bd2e937-9db1-4425-93b0-3cde8aae9565" (UID: "9bd2e937-9db1-4425-93b0-3cde8aae9565"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.480887 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bd2e937-9db1-4425-93b0-3cde8aae9565-kube-api-access-7kr5j" (OuterVolumeSpecName: "kube-api-access-7kr5j") pod "9bd2e937-9db1-4425-93b0-3cde8aae9565" (UID: "9bd2e937-9db1-4425-93b0-3cde8aae9565"). InnerVolumeSpecName "kube-api-access-7kr5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.572619 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9bd2e937-9db1-4425-93b0-3cde8aae9565-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.572667 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kr5j\" (UniqueName: \"kubernetes.io/projected/9bd2e937-9db1-4425-93b0-3cde8aae9565-kube-api-access-7kr5j\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.885662 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" event={"ID":"9bd2e937-9db1-4425-93b0-3cde8aae9565","Type":"ContainerDied","Data":"bb697f6910742e4c37d764d4ac611df8703eca76e3d9c9fe5343481f70fa59c0"} Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.885736 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb697f6910742e4c37d764d4ac611df8703eca76e3d9c9fe5343481f70fa59c0" Jan 21 11:31:22 crc kubenswrapper[4925]: I0121 11:31:22.885834 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher9a3f-account-delete-g97mj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.364557 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-8pgtn"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.380675 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-8pgtn"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.400509 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher9a3f-account-delete-g97mj"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.417262 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-9a3f-account-create-update-w297x"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.426245 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher9a3f-account-delete-g97mj"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.435248 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-9a3f-account-create-update-w297x"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.570866 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-f8llq"] Jan 21 11:31:28 crc kubenswrapper[4925]: E0121 11:31:28.571654 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bd2e937-9db1-4425-93b0-3cde8aae9565" containerName="mariadb-account-delete" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.571679 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bd2e937-9db1-4425-93b0-3cde8aae9565" containerName="mariadb-account-delete" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.571857 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bd2e937-9db1-4425-93b0-3cde8aae9565" containerName="mariadb-account-delete" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.572667 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.588577 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-17b2-account-create-update-f46xj"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.590041 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.592290 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.598010 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-f8llq"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.615635 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9df7m\" (UniqueName: \"kubernetes.io/projected/8d044e09-ded4-4821-8968-b1837b9fee65-kube-api-access-9df7m\") pod \"watcher-17b2-account-create-update-f46xj\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.615688 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brhxp\" (UniqueName: \"kubernetes.io/projected/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-kube-api-access-brhxp\") pod \"watcher-db-create-f8llq\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.615855 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d044e09-ded4-4821-8968-b1837b9fee65-operator-scripts\") pod \"watcher-17b2-account-create-update-f46xj\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.615890 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-operator-scripts\") pod \"watcher-db-create-f8llq\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.648593 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-17b2-account-create-update-f46xj"] Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.723279 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9df7m\" (UniqueName: \"kubernetes.io/projected/8d044e09-ded4-4821-8968-b1837b9fee65-kube-api-access-9df7m\") pod \"watcher-17b2-account-create-update-f46xj\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.723350 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brhxp\" (UniqueName: \"kubernetes.io/projected/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-kube-api-access-brhxp\") pod \"watcher-db-create-f8llq\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.723484 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d044e09-ded4-4821-8968-b1837b9fee65-operator-scripts\") pod \"watcher-17b2-account-create-update-f46xj\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.723529 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-operator-scripts\") pod \"watcher-db-create-f8llq\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.724766 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d044e09-ded4-4821-8968-b1837b9fee65-operator-scripts\") pod \"watcher-17b2-account-create-update-f46xj\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.724799 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-operator-scripts\") pod \"watcher-db-create-f8llq\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.749508 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brhxp\" (UniqueName: \"kubernetes.io/projected/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-kube-api-access-brhxp\") pod \"watcher-db-create-f8llq\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.749739 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9df7m\" (UniqueName: \"kubernetes.io/projected/8d044e09-ded4-4821-8968-b1837b9fee65-kube-api-access-9df7m\") pod \"watcher-17b2-account-create-update-f46xj\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.907271 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:28 crc kubenswrapper[4925]: I0121 11:31:28.916960 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:29 crc kubenswrapper[4925]: I0121 11:31:29.521633 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="996cd8c6-1a8f-491b-a556-3c645f3b94d1" path="/var/lib/kubelet/pods/996cd8c6-1a8f-491b-a556-3c645f3b94d1/volumes" Jan 21 11:31:29 crc kubenswrapper[4925]: I0121 11:31:29.522500 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bd2e937-9db1-4425-93b0-3cde8aae9565" path="/var/lib/kubelet/pods/9bd2e937-9db1-4425-93b0-3cde8aae9565/volumes" Jan 21 11:31:29 crc kubenswrapper[4925]: I0121 11:31:29.523024 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c898cc46-eec6-42b0-a5a0-73ae13b0d2b1" path="/var/lib/kubelet/pods/c898cc46-eec6-42b0-a5a0-73ae13b0d2b1/volumes" Jan 21 11:31:29 crc kubenswrapper[4925]: I0121 11:31:29.524148 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-f8llq"] Jan 21 11:31:29 crc kubenswrapper[4925]: I0121 11:31:29.577566 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-17b2-account-create-update-f46xj"] Jan 21 11:31:29 crc kubenswrapper[4925]: I0121 11:31:29.974174 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" event={"ID":"8d044e09-ded4-4821-8968-b1837b9fee65","Type":"ContainerStarted","Data":"51bf79db07d4df3576d9fc87e96fcee13a9a1cbb2a6a74d513219c69d1fa772e"} Jan 21 11:31:29 crc kubenswrapper[4925]: I0121 11:31:29.976091 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-f8llq" event={"ID":"e1a21414-a4cb-4af5-8fb5-1fafdbea797e","Type":"ContainerStarted","Data":"2bea499e8aab94c1d7703e8b8d9da4f56e0dcc8476a40cfa78f046a1f896ddc8"} Jan 21 11:31:30 crc kubenswrapper[4925]: I0121 11:31:30.987210 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-f8llq" event={"ID":"e1a21414-a4cb-4af5-8fb5-1fafdbea797e","Type":"ContainerStarted","Data":"34eb2f2c6470b537c3c0d6172f9d00f1fbb89e7eb688abae8f94ccb0d84d1053"} Jan 21 11:31:30 crc kubenswrapper[4925]: I0121 11:31:30.989050 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" event={"ID":"8d044e09-ded4-4821-8968-b1837b9fee65","Type":"ContainerStarted","Data":"6b81dc42a1cb33f3f18eb36fd756bede6c02ef8b19f6dd37a43055da637c521d"} Jan 21 11:31:31 crc kubenswrapper[4925]: I0121 11:31:31.006847 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-db-create-f8llq" podStartSLOduration=3.006810824 podStartE2EDuration="3.006810824s" podCreationTimestamp="2026-01-21 11:31:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:31:31.003184329 +0000 UTC m=+2182.607076263" watchObservedRunningTime="2026-01-21 11:31:31.006810824 +0000 UTC m=+2182.610702768" Jan 21 11:31:31 crc kubenswrapper[4925]: I0121 11:31:31.029713 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" podStartSLOduration=3.029690734 podStartE2EDuration="3.029690734s" podCreationTimestamp="2026-01-21 11:31:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:31:31.024631215 +0000 UTC m=+2182.628523149" watchObservedRunningTime="2026-01-21 11:31:31.029690734 +0000 UTC m=+2182.633582658" Jan 21 11:31:32 crc kubenswrapper[4925]: I0121 11:31:32.001288 4925 generic.go:334] "Generic (PLEG): container finished" podID="e1a21414-a4cb-4af5-8fb5-1fafdbea797e" containerID="34eb2f2c6470b537c3c0d6172f9d00f1fbb89e7eb688abae8f94ccb0d84d1053" exitCode=0 Jan 21 11:31:32 crc kubenswrapper[4925]: I0121 11:31:32.001429 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-f8llq" event={"ID":"e1a21414-a4cb-4af5-8fb5-1fafdbea797e","Type":"ContainerDied","Data":"34eb2f2c6470b537c3c0d6172f9d00f1fbb89e7eb688abae8f94ccb0d84d1053"} Jan 21 11:31:32 crc kubenswrapper[4925]: I0121 11:31:32.003612 4925 generic.go:334] "Generic (PLEG): container finished" podID="8d044e09-ded4-4821-8968-b1837b9fee65" containerID="6b81dc42a1cb33f3f18eb36fd756bede6c02ef8b19f6dd37a43055da637c521d" exitCode=0 Jan 21 11:31:32 crc kubenswrapper[4925]: I0121 11:31:32.003642 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" event={"ID":"8d044e09-ded4-4821-8968-b1837b9fee65","Type":"ContainerDied","Data":"6b81dc42a1cb33f3f18eb36fd756bede6c02ef8b19f6dd37a43055da637c521d"} Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.507251 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.538101 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.694182 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9df7m\" (UniqueName: \"kubernetes.io/projected/8d044e09-ded4-4821-8968-b1837b9fee65-kube-api-access-9df7m\") pod \"8d044e09-ded4-4821-8968-b1837b9fee65\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.694690 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-operator-scripts\") pod \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.694837 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d044e09-ded4-4821-8968-b1837b9fee65-operator-scripts\") pod \"8d044e09-ded4-4821-8968-b1837b9fee65\" (UID: \"8d044e09-ded4-4821-8968-b1837b9fee65\") " Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.694946 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brhxp\" (UniqueName: \"kubernetes.io/projected/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-kube-api-access-brhxp\") pod \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\" (UID: \"e1a21414-a4cb-4af5-8fb5-1fafdbea797e\") " Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.695914 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d044e09-ded4-4821-8968-b1837b9fee65-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8d044e09-ded4-4821-8968-b1837b9fee65" (UID: "8d044e09-ded4-4821-8968-b1837b9fee65"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.695925 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e1a21414-a4cb-4af5-8fb5-1fafdbea797e" (UID: "e1a21414-a4cb-4af5-8fb5-1fafdbea797e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.696382 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.696542 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d044e09-ded4-4821-8968-b1837b9fee65-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.704236 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d044e09-ded4-4821-8968-b1837b9fee65-kube-api-access-9df7m" (OuterVolumeSpecName: "kube-api-access-9df7m") pod "8d044e09-ded4-4821-8968-b1837b9fee65" (UID: "8d044e09-ded4-4821-8968-b1837b9fee65"). InnerVolumeSpecName "kube-api-access-9df7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.710874 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-kube-api-access-brhxp" (OuterVolumeSpecName: "kube-api-access-brhxp") pod "e1a21414-a4cb-4af5-8fb5-1fafdbea797e" (UID: "e1a21414-a4cb-4af5-8fb5-1fafdbea797e"). InnerVolumeSpecName "kube-api-access-brhxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.798095 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brhxp\" (UniqueName: \"kubernetes.io/projected/e1a21414-a4cb-4af5-8fb5-1fafdbea797e-kube-api-access-brhxp\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:33 crc kubenswrapper[4925]: I0121 11:31:33.798153 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9df7m\" (UniqueName: \"kubernetes.io/projected/8d044e09-ded4-4821-8968-b1837b9fee65-kube-api-access-9df7m\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:34 crc kubenswrapper[4925]: I0121 11:31:34.021155 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" event={"ID":"8d044e09-ded4-4821-8968-b1837b9fee65","Type":"ContainerDied","Data":"51bf79db07d4df3576d9fc87e96fcee13a9a1cbb2a6a74d513219c69d1fa772e"} Jan 21 11:31:34 crc kubenswrapper[4925]: I0121 11:31:34.021218 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51bf79db07d4df3576d9fc87e96fcee13a9a1cbb2a6a74d513219c69d1fa772e" Jan 21 11:31:34 crc kubenswrapper[4925]: I0121 11:31:34.021195 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-17b2-account-create-update-f46xj" Jan 21 11:31:34 crc kubenswrapper[4925]: I0121 11:31:34.023295 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-f8llq" event={"ID":"e1a21414-a4cb-4af5-8fb5-1fafdbea797e","Type":"ContainerDied","Data":"2bea499e8aab94c1d7703e8b8d9da4f56e0dcc8476a40cfa78f046a1f896ddc8"} Jan 21 11:31:34 crc kubenswrapper[4925]: I0121 11:31:34.023342 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-f8llq" Jan 21 11:31:34 crc kubenswrapper[4925]: I0121 11:31:34.023349 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bea499e8aab94c1d7703e8b8d9da4f56e0dcc8476a40cfa78f046a1f896ddc8" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.199410 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-2p548"] Jan 21 11:31:39 crc kubenswrapper[4925]: E0121 11:31:39.200727 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d044e09-ded4-4821-8968-b1837b9fee65" containerName="mariadb-account-create-update" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.200748 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d044e09-ded4-4821-8968-b1837b9fee65" containerName="mariadb-account-create-update" Jan 21 11:31:39 crc kubenswrapper[4925]: E0121 11:31:39.200817 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1a21414-a4cb-4af5-8fb5-1fafdbea797e" containerName="mariadb-database-create" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.200830 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1a21414-a4cb-4af5-8fb5-1fafdbea797e" containerName="mariadb-database-create" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.201078 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1a21414-a4cb-4af5-8fb5-1fafdbea797e" containerName="mariadb-database-create" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.201095 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d044e09-ded4-4821-8968-b1837b9fee65" containerName="mariadb-account-create-update" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.207019 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.210678 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-mxm2j" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.210988 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.218928 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-2p548"] Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.370150 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lp77\" (UniqueName: \"kubernetes.io/projected/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-kube-api-access-6lp77\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.370318 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-db-sync-config-data\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.370430 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-config-data\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.370457 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.471514 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lp77\" (UniqueName: \"kubernetes.io/projected/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-kube-api-access-6lp77\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.471668 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-db-sync-config-data\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.471749 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-config-data\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.471776 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.483048 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-config-data\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.483521 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-db-sync-config-data\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.485962 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.499994 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lp77\" (UniqueName: \"kubernetes.io/projected/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-kube-api-access-6lp77\") pod \"watcher-kuttl-db-sync-2p548\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:39 crc kubenswrapper[4925]: I0121 11:31:39.535221 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:40 crc kubenswrapper[4925]: I0121 11:31:40.224664 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-2p548"] Jan 21 11:31:41 crc kubenswrapper[4925]: I0121 11:31:41.226967 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" event={"ID":"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c","Type":"ContainerStarted","Data":"090b14403ece320980b1a8c9c6dc2e68704b7cd51811cb33c049746c3b9e8c7a"} Jan 21 11:31:41 crc kubenswrapper[4925]: I0121 11:31:41.227438 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" event={"ID":"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c","Type":"ContainerStarted","Data":"91e9da805a41d2f5cb998cd09cb5b3010fffbc8ba3b18b6590482298befc1bef"} Jan 21 11:31:41 crc kubenswrapper[4925]: I0121 11:31:41.272462 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" podStartSLOduration=2.272427288 podStartE2EDuration="2.272427288s" podCreationTimestamp="2026-01-21 11:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:31:41.256962612 +0000 UTC m=+2192.860854556" watchObservedRunningTime="2026-01-21 11:31:41.272427288 +0000 UTC m=+2192.876319312" Jan 21 11:31:46 crc kubenswrapper[4925]: I0121 11:31:46.598570 4925 generic.go:334] "Generic (PLEG): container finished" podID="5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" containerID="090b14403ece320980b1a8c9c6dc2e68704b7cd51811cb33c049746c3b9e8c7a" exitCode=0 Jan 21 11:31:46 crc kubenswrapper[4925]: I0121 11:31:46.599283 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" event={"ID":"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c","Type":"ContainerDied","Data":"090b14403ece320980b1a8c9c6dc2e68704b7cd51811cb33c049746c3b9e8c7a"} Jan 21 11:31:47 crc kubenswrapper[4925]: I0121 11:31:47.998744 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.104806 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-combined-ca-bundle\") pod \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.105012 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-db-sync-config-data\") pod \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.105106 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lp77\" (UniqueName: \"kubernetes.io/projected/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-kube-api-access-6lp77\") pod \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.105206 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-config-data\") pod \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\" (UID: \"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c\") " Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.120787 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" (UID: "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.121703 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-kube-api-access-6lp77" (OuterVolumeSpecName: "kube-api-access-6lp77") pod "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" (UID: "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c"). InnerVolumeSpecName "kube-api-access-6lp77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.165926 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" (UID: "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.180880 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-config-data" (OuterVolumeSpecName: "config-data") pod "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" (UID: "5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.208580 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.208840 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lp77\" (UniqueName: \"kubernetes.io/projected/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-kube-api-access-6lp77\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.208964 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.209078 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.682195 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" event={"ID":"5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c","Type":"ContainerDied","Data":"91e9da805a41d2f5cb998cd09cb5b3010fffbc8ba3b18b6590482298befc1bef"} Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.682241 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91e9da805a41d2f5cb998cd09cb5b3010fffbc8ba3b18b6590482298befc1bef" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.682309 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-2p548" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.916444 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:31:48 crc kubenswrapper[4925]: E0121 11:31:48.917294 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" containerName="watcher-kuttl-db-sync" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.917479 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" containerName="watcher-kuttl-db-sync" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.917761 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" containerName="watcher-kuttl-db-sync" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.918757 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.923969 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.924251 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-mxm2j" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.931984 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.934524 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.941115 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.952564 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:31:48 crc kubenswrapper[4925]: I0121 11:31:48.968839 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.048570 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.048590 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.052560 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.052648 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxd7h\" (UniqueName: \"kubernetes.io/projected/efea85ac-ede7-455e-9d14-af3388f2d2a8-kube-api-access-cxd7h\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.052731 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.052794 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.052921 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.052969 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efea85ac-ede7-455e-9d14-af3388f2d2a8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.053186 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.053336 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.053427 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.053472 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.053536 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tbrb\" (UniqueName: \"kubernetes.io/projected/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-kube-api-access-6tbrb\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.055004 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.061717 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.063881 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155068 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155128 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efea85ac-ede7-455e-9d14-af3388f2d2a8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155210 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155249 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155302 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155336 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155410 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155442 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155487 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tbrb\" (UniqueName: \"kubernetes.io/projected/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-kube-api-access-6tbrb\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155539 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155603 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155649 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxd7h\" (UniqueName: \"kubernetes.io/projected/efea85ac-ede7-455e-9d14-af3388f2d2a8-kube-api-access-cxd7h\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155690 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155725 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dce28f97-8f72-40cb-909d-6beedda127d4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155757 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155785 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvjbx\" (UniqueName: \"kubernetes.io/projected/dce28f97-8f72-40cb-909d-6beedda127d4-kube-api-access-mvjbx\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.155817 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.157854 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efea85ac-ede7-455e-9d14-af3388f2d2a8-logs\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.159102 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.169733 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.170931 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.171586 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.171935 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.172765 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.174010 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.175269 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.178875 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.182219 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxd7h\" (UniqueName: \"kubernetes.io/projected/efea85ac-ede7-455e-9d14-af3388f2d2a8-kube-api-access-cxd7h\") pod \"watcher-kuttl-api-0\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.182748 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tbrb\" (UniqueName: \"kubernetes.io/projected/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-kube-api-access-6tbrb\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.248325 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.257895 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.258012 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.258099 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dce28f97-8f72-40cb-909d-6beedda127d4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.258153 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvjbx\" (UniqueName: \"kubernetes.io/projected/dce28f97-8f72-40cb-909d-6beedda127d4-kube-api-access-mvjbx\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.258183 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.258348 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.259251 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dce28f97-8f72-40cb-909d-6beedda127d4-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.263571 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.263770 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.268918 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.287631 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvjbx\" (UniqueName: \"kubernetes.io/projected/dce28f97-8f72-40cb-909d-6beedda127d4-kube-api-access-mvjbx\") pod \"watcher-kuttl-applier-0\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:49 crc kubenswrapper[4925]: I0121 11:31:49.678127 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:50 crc kubenswrapper[4925]: I0121 11:31:50.117167 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:31:50 crc kubenswrapper[4925]: I0121 11:31:50.117650 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:31:51 crc kubenswrapper[4925]: I0121 11:31:51.007887 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:31:51 crc kubenswrapper[4925]: I0121 11:31:51.068910 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:31:51 crc kubenswrapper[4925]: I0121 11:31:51.072916 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.090752 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"dce28f97-8f72-40cb-909d-6beedda127d4","Type":"ContainerStarted","Data":"76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742"} Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.091152 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"dce28f97-8f72-40cb-909d-6beedda127d4","Type":"ContainerStarted","Data":"fb6d08a1efd4f69256a0ea8c58b7bda9ab086cdf552258d4d6ae30611b6a5184"} Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.098949 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"efea85ac-ede7-455e-9d14-af3388f2d2a8","Type":"ContainerStarted","Data":"ef13f034da643fab998cdd91c4cd6a07862ae3c895b06e81333eb3c5bb8547ad"} Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.099022 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"efea85ac-ede7-455e-9d14-af3388f2d2a8","Type":"ContainerStarted","Data":"597294891bb7d4751059f8b4485f8333b7cebf88dd45fc8e6536fc0e063a0446"} Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.103090 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955","Type":"ContainerStarted","Data":"47649524245fb76a3e6d4079b389ee638b38aa1cc5768ac44750b71f28af6809"} Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.103164 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955","Type":"ContainerStarted","Data":"945892f39543d38c00b10d1baefbd1734610bbe890db7ebd457ef168ba23de12"} Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.129805 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=3.129780735 podStartE2EDuration="3.129780735s" podCreationTimestamp="2026-01-21 11:31:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:31:52.121177844 +0000 UTC m=+2203.725069778" watchObservedRunningTime="2026-01-21 11:31:52.129780735 +0000 UTC m=+2203.733672659" Jan 21 11:31:52 crc kubenswrapper[4925]: I0121 11:31:52.153272 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=4.153228773 podStartE2EDuration="4.153228773s" podCreationTimestamp="2026-01-21 11:31:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:31:52.148264286 +0000 UTC m=+2203.752156220" watchObservedRunningTime="2026-01-21 11:31:52.153228773 +0000 UTC m=+2203.757120707" Jan 21 11:31:53 crc kubenswrapper[4925]: I0121 11:31:53.116014 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"efea85ac-ede7-455e-9d14-af3388f2d2a8","Type":"ContainerStarted","Data":"2765da34011ea0c93705495808cc3926f804623ed99c4ff497aeebc3c99c993f"} Jan 21 11:31:53 crc kubenswrapper[4925]: I0121 11:31:53.147353 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=5.147332307 podStartE2EDuration="5.147332307s" podCreationTimestamp="2026-01-21 11:31:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:31:53.144526369 +0000 UTC m=+2204.748418323" watchObservedRunningTime="2026-01-21 11:31:53.147332307 +0000 UTC m=+2204.751224241" Jan 21 11:31:54 crc kubenswrapper[4925]: I0121 11:31:54.138804 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:54 crc kubenswrapper[4925]: I0121 11:31:54.259702 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:54 crc kubenswrapper[4925]: I0121 11:31:54.679924 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:56 crc kubenswrapper[4925]: I0121 11:31:56.162682 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:31:57 crc kubenswrapper[4925]: I0121 11:31:57.989508 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:59 crc kubenswrapper[4925]: I0121 11:31:59.327572 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:59 crc kubenswrapper[4925]: I0121 11:31:59.330767 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:59 crc kubenswrapper[4925]: I0121 11:31:59.496260 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:31:59 crc kubenswrapper[4925]: I0121 11:31:59.522243 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:31:59 crc kubenswrapper[4925]: I0121 11:31:59.679519 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:31:59 crc kubenswrapper[4925]: I0121 11:31:59.711310 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.346580 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q69zm"] Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.348833 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.360814 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q69zm"] Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.459133 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-catalog-content\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.459181 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-utilities\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.459266 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zc4c\" (UniqueName: \"kubernetes.io/projected/644e5583-d8a8-4351-af74-7d72252f2e9a-kube-api-access-5zc4c\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.461562 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.470099 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.489197 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.489348 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.561518 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-catalog-content\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.562181 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-catalog-content\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.562284 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-utilities\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.562598 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-utilities\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.562605 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zc4c\" (UniqueName: \"kubernetes.io/projected/644e5583-d8a8-4351-af74-7d72252f2e9a-kube-api-access-5zc4c\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.606644 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zc4c\" (UniqueName: \"kubernetes.io/projected/644e5583-d8a8-4351-af74-7d72252f2e9a-kube-api-access-5zc4c\") pod \"redhat-operators-q69zm\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:00 crc kubenswrapper[4925]: I0121 11:32:00.674938 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:01 crc kubenswrapper[4925]: I0121 11:32:01.322138 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q69zm"] Jan 21 11:32:01 crc kubenswrapper[4925]: I0121 11:32:01.473925 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q69zm" event={"ID":"644e5583-d8a8-4351-af74-7d72252f2e9a","Type":"ContainerStarted","Data":"7ddf5662a41e7c88fb34d98be5e80565fd7826adc5c84fac7f4a423fe71c3f89"} Jan 21 11:32:02 crc kubenswrapper[4925]: I0121 11:32:02.489601 4925 generic.go:334] "Generic (PLEG): container finished" podID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerID="fc4bf639600371f091d6ac33c9e259f8a399354a4c89180ca78de547e96f49d4" exitCode=0 Jan 21 11:32:02 crc kubenswrapper[4925]: I0121 11:32:02.491980 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q69zm" event={"ID":"644e5583-d8a8-4351-af74-7d72252f2e9a","Type":"ContainerDied","Data":"fc4bf639600371f091d6ac33c9e259f8a399354a4c89180ca78de547e96f49d4"} Jan 21 11:32:02 crc kubenswrapper[4925]: I0121 11:32:02.497111 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:32:04 crc kubenswrapper[4925]: I0121 11:32:04.685344 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:04 crc kubenswrapper[4925]: I0121 11:32:04.686355 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-central-agent" containerID="cri-o://5ed03fbb64eb1b3089e897600e49432c63a04d574844ba1fe896bffd0dbfa385" gracePeriod=30 Jan 21 11:32:04 crc kubenswrapper[4925]: I0121 11:32:04.687088 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="proxy-httpd" containerID="cri-o://32d76203f4ad83584d070c8654ebdd08cce9cb4596bdf4833a018e3ccbcf3ca9" gracePeriod=30 Jan 21 11:32:04 crc kubenswrapper[4925]: I0121 11:32:04.687142 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="sg-core" containerID="cri-o://108d090570a3f84e078afe94a503335e8d287d0cef29aefefb0ebf280802d9fd" gracePeriod=30 Jan 21 11:32:04 crc kubenswrapper[4925]: I0121 11:32:04.687177 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-notification-agent" containerID="cri-o://5a931a602f84dec188a493ab23928a2e795a0293644f8ac20e3a9b9a0457dfbe" gracePeriod=30 Jan 21 11:32:05 crc kubenswrapper[4925]: I0121 11:32:05.610663 4925 generic.go:334] "Generic (PLEG): container finished" podID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerID="32d76203f4ad83584d070c8654ebdd08cce9cb4596bdf4833a018e3ccbcf3ca9" exitCode=0 Jan 21 11:32:05 crc kubenswrapper[4925]: I0121 11:32:05.611034 4925 generic.go:334] "Generic (PLEG): container finished" podID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerID="108d090570a3f84e078afe94a503335e8d287d0cef29aefefb0ebf280802d9fd" exitCode=2 Jan 21 11:32:05 crc kubenswrapper[4925]: I0121 11:32:05.614376 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerDied","Data":"32d76203f4ad83584d070c8654ebdd08cce9cb4596bdf4833a018e3ccbcf3ca9"} Jan 21 11:32:05 crc kubenswrapper[4925]: I0121 11:32:05.614459 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerDied","Data":"108d090570a3f84e078afe94a503335e8d287d0cef29aefefb0ebf280802d9fd"} Jan 21 11:32:05 crc kubenswrapper[4925]: I0121 11:32:05.615351 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q69zm" event={"ID":"644e5583-d8a8-4351-af74-7d72252f2e9a","Type":"ContainerStarted","Data":"453fd643287e1d40e75803b12cb52f373a0fbdb54e4ddf5bab744f3a83186ee7"} Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.069825 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-2p548"] Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.077920 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-2p548"] Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.300480 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.300844 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="dce28f97-8f72-40cb-909d-6beedda127d4" containerName="watcher-applier" containerID="cri-o://76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742" gracePeriod=30 Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.325244 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher17b2-account-delete-s4vdj"] Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.392568 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher17b2-account-delete-s4vdj"] Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.392776 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.407671 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.407946 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-kuttl-api-log" containerID="cri-o://ef13f034da643fab998cdd91c4cd6a07862ae3c895b06e81333eb3c5bb8547ad" gracePeriod=30 Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.408142 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-api" containerID="cri-o://2765da34011ea0c93705495808cc3926f804623ed99c4ff497aeebc3c99c993f" gracePeriod=30 Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.430543 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.430814 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" containerName="watcher-decision-engine" containerID="cri-o://47649524245fb76a3e6d4079b389ee638b38aa1cc5768ac44750b71f28af6809" gracePeriod=30 Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.497521 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29cd12b5-4aef-4b75-871b-82a3152f84b6-operator-scripts\") pod \"watcher17b2-account-delete-s4vdj\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.497621 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbh2p\" (UniqueName: \"kubernetes.io/projected/29cd12b5-4aef-4b75-871b-82a3152f84b6-kube-api-access-nbh2p\") pod \"watcher17b2-account-delete-s4vdj\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.600269 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29cd12b5-4aef-4b75-871b-82a3152f84b6-operator-scripts\") pod \"watcher17b2-account-delete-s4vdj\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.600355 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbh2p\" (UniqueName: \"kubernetes.io/projected/29cd12b5-4aef-4b75-871b-82a3152f84b6-kube-api-access-nbh2p\") pod \"watcher17b2-account-delete-s4vdj\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.601993 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29cd12b5-4aef-4b75-871b-82a3152f84b6-operator-scripts\") pod \"watcher17b2-account-delete-s4vdj\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.649177 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbh2p\" (UniqueName: \"kubernetes.io/projected/29cd12b5-4aef-4b75-871b-82a3152f84b6-kube-api-access-nbh2p\") pod \"watcher17b2-account-delete-s4vdj\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.682662 4925 generic.go:334] "Generic (PLEG): container finished" podID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerID="5ed03fbb64eb1b3089e897600e49432c63a04d574844ba1fe896bffd0dbfa385" exitCode=0 Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.682793 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerDied","Data":"5ed03fbb64eb1b3089e897600e49432c63a04d574844ba1fe896bffd0dbfa385"} Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.709008 4925 generic.go:334] "Generic (PLEG): container finished" podID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerID="ef13f034da643fab998cdd91c4cd6a07862ae3c895b06e81333eb3c5bb8547ad" exitCode=143 Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.709479 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"efea85ac-ede7-455e-9d14-af3388f2d2a8","Type":"ContainerDied","Data":"ef13f034da643fab998cdd91c4cd6a07862ae3c895b06e81333eb3c5bb8547ad"} Jan 21 11:32:06 crc kubenswrapper[4925]: I0121 11:32:06.725926 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:07 crc kubenswrapper[4925]: I0121 11:32:07.805421 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c" path="/var/lib/kubelet/pods/5627ee3d-fbc3-4ee6-8f33-7b0eeb0ff92c/volumes" Jan 21 11:32:07 crc kubenswrapper[4925]: I0121 11:32:07.903654 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher17b2-account-delete-s4vdj"] Jan 21 11:32:08 crc kubenswrapper[4925]: I0121 11:32:08.824238 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" event={"ID":"29cd12b5-4aef-4b75-871b-82a3152f84b6","Type":"ContainerStarted","Data":"511114bef797557267e84ba194482469adfa7be9eeb31e47c680cb3a836b3c63"} Jan 21 11:32:09 crc kubenswrapper[4925]: I0121 11:32:09.340157 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9322/\": read tcp 10.217.0.2:47518->10.217.0.173:9322: read: connection reset by peer" Jan 21 11:32:09 crc kubenswrapper[4925]: I0121 11:32:09.340157 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.173:9322/\": read tcp 10.217.0.2:47502->10.217.0.173:9322: read: connection reset by peer" Jan 21 11:32:09 crc kubenswrapper[4925]: E0121 11:32:09.682668 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:32:09 crc kubenswrapper[4925]: E0121 11:32:09.684800 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:32:09 crc kubenswrapper[4925]: E0121 11:32:09.687076 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:32:09 crc kubenswrapper[4925]: E0121 11:32:09.687150 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="dce28f97-8f72-40cb-909d-6beedda127d4" containerName="watcher-applier" Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.854949 4925 generic.go:334] "Generic (PLEG): container finished" podID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerID="453fd643287e1d40e75803b12cb52f373a0fbdb54e4ddf5bab744f3a83186ee7" exitCode=0 Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.855067 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q69zm" event={"ID":"644e5583-d8a8-4351-af74-7d72252f2e9a","Type":"ContainerDied","Data":"453fd643287e1d40e75803b12cb52f373a0fbdb54e4ddf5bab744f3a83186ee7"} Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.862778 4925 generic.go:334] "Generic (PLEG): container finished" podID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerID="5a931a602f84dec188a493ab23928a2e795a0293644f8ac20e3a9b9a0457dfbe" exitCode=0 Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.862837 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerDied","Data":"5a931a602f84dec188a493ab23928a2e795a0293644f8ac20e3a9b9a0457dfbe"} Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.867587 4925 generic.go:334] "Generic (PLEG): container finished" podID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerID="2765da34011ea0c93705495808cc3926f804623ed99c4ff497aeebc3c99c993f" exitCode=0 Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.867703 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"efea85ac-ede7-455e-9d14-af3388f2d2a8","Type":"ContainerDied","Data":"2765da34011ea0c93705495808cc3926f804623ed99c4ff497aeebc3c99c993f"} Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.883419 4925 generic.go:334] "Generic (PLEG): container finished" podID="29cd12b5-4aef-4b75-871b-82a3152f84b6" containerID="0e6ce034b453fc8c600dbcedeb85d10818cb413a9c7d1e6c8b02f2d84095e404" exitCode=0 Jan 21 11:32:10 crc kubenswrapper[4925]: I0121 11:32:10.883481 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" event={"ID":"29cd12b5-4aef-4b75-871b-82a3152f84b6","Type":"ContainerDied","Data":"0e6ce034b453fc8c600dbcedeb85d10818cb413a9c7d1e6c8b02f2d84095e404"} Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.106537 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.357583 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqqxn\" (UniqueName: \"kubernetes.io/projected/1019a31b-2bb6-4e0b-bf17-950052095e18-kube-api-access-cqqxn\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.363078 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-combined-ca-bundle\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.363273 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-config-data\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.363343 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-run-httpd\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.363373 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-ceilometer-tls-certs\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.363456 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-scripts\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.363489 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-log-httpd\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.363545 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-sg-core-conf-yaml\") pod \"1019a31b-2bb6-4e0b-bf17-950052095e18\" (UID: \"1019a31b-2bb6-4e0b-bf17-950052095e18\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.366797 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.376711 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1019a31b-2bb6-4e0b-bf17-950052095e18-kube-api-access-cqqxn" (OuterVolumeSpecName: "kube-api-access-cqqxn") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "kube-api-access-cqqxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.378594 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.391567 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-scripts" (OuterVolumeSpecName: "scripts") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.466287 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqqxn\" (UniqueName: \"kubernetes.io/projected/1019a31b-2bb6-4e0b-bf17-950052095e18-kube-api-access-cqqxn\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.476459 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.476727 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.476855 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1019a31b-2bb6-4e0b-bf17-950052095e18-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.501691 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.583310 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.616625 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.657654 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-config-data" (OuterVolumeSpecName: "config-data") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.692652 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1019a31b-2bb6-4e0b-bf17-950052095e18" (UID: "1019a31b-2bb6-4e0b-bf17-950052095e18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.693825 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.693852 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.693861 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1019a31b-2bb6-4e0b-bf17-950052095e18-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.707177 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.795520 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxd7h\" (UniqueName: \"kubernetes.io/projected/efea85ac-ede7-455e-9d14-af3388f2d2a8-kube-api-access-cxd7h\") pod \"efea85ac-ede7-455e-9d14-af3388f2d2a8\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.795828 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-combined-ca-bundle\") pod \"efea85ac-ede7-455e-9d14-af3388f2d2a8\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.795861 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-custom-prometheus-ca\") pod \"efea85ac-ede7-455e-9d14-af3388f2d2a8\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.795890 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-cert-memcached-mtls\") pod \"efea85ac-ede7-455e-9d14-af3388f2d2a8\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.795959 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-config-data\") pod \"efea85ac-ede7-455e-9d14-af3388f2d2a8\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.796040 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efea85ac-ede7-455e-9d14-af3388f2d2a8-logs\") pod \"efea85ac-ede7-455e-9d14-af3388f2d2a8\" (UID: \"efea85ac-ede7-455e-9d14-af3388f2d2a8\") " Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.797012 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efea85ac-ede7-455e-9d14-af3388f2d2a8-logs" (OuterVolumeSpecName: "logs") pod "efea85ac-ede7-455e-9d14-af3388f2d2a8" (UID: "efea85ac-ede7-455e-9d14-af3388f2d2a8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.802251 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efea85ac-ede7-455e-9d14-af3388f2d2a8-kube-api-access-cxd7h" (OuterVolumeSpecName: "kube-api-access-cxd7h") pod "efea85ac-ede7-455e-9d14-af3388f2d2a8" (UID: "efea85ac-ede7-455e-9d14-af3388f2d2a8"). InnerVolumeSpecName "kube-api-access-cxd7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:11 crc kubenswrapper[4925]: I0121 11:32:11.835562 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "efea85ac-ede7-455e-9d14-af3388f2d2a8" (UID: "efea85ac-ede7-455e-9d14-af3388f2d2a8"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.009245 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efea85ac-ede7-455e-9d14-af3388f2d2a8-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.009283 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxd7h\" (UniqueName: \"kubernetes.io/projected/efea85ac-ede7-455e-9d14-af3388f2d2a8-kube-api-access-cxd7h\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.009299 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.079635 4925 generic.go:334] "Generic (PLEG): container finished" podID="dce28f97-8f72-40cb-909d-6beedda127d4" containerID="76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742" exitCode=0 Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.079732 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"dce28f97-8f72-40cb-909d-6beedda127d4","Type":"ContainerDied","Data":"76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742"} Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.079747 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "efea85ac-ede7-455e-9d14-af3388f2d2a8" (UID: "efea85ac-ede7-455e-9d14-af3388f2d2a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.105559 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"1019a31b-2bb6-4e0b-bf17-950052095e18","Type":"ContainerDied","Data":"f14c740cfb34a91f57ab94dfd22ddd7d89b74a3a412dcafbb09291b36caf3402"} Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.105683 4925 scope.go:117] "RemoveContainer" containerID="32d76203f4ad83584d070c8654ebdd08cce9cb4596bdf4833a018e3ccbcf3ca9" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.105706 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.111028 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.112309 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.112990 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"efea85ac-ede7-455e-9d14-af3388f2d2a8","Type":"ContainerDied","Data":"597294891bb7d4751059f8b4485f8333b7cebf88dd45fc8e6536fc0e063a0446"} Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.117223 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "efea85ac-ede7-455e-9d14-af3388f2d2a8" (UID: "efea85ac-ede7-455e-9d14-af3388f2d2a8"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.143352 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-config-data" (OuterVolumeSpecName: "config-data") pod "efea85ac-ede7-455e-9d14-af3388f2d2a8" (UID: "efea85ac-ede7-455e-9d14-af3388f2d2a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.212543 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.220618 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efea85ac-ede7-455e-9d14-af3388f2d2a8-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.271383 4925 scope.go:117] "RemoveContainer" containerID="108d090570a3f84e078afe94a503335e8d287d0cef29aefefb0ebf280802d9fd" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.271794 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.309570 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.321843 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:12 crc kubenswrapper[4925]: E0121 11:32:12.322347 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-kuttl-api-log" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322366 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-kuttl-api-log" Jan 21 11:32:12 crc kubenswrapper[4925]: E0121 11:32:12.322379 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="proxy-httpd" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322386 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="proxy-httpd" Jan 21 11:32:12 crc kubenswrapper[4925]: E0121 11:32:12.322421 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="sg-core" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322429 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="sg-core" Jan 21 11:32:12 crc kubenswrapper[4925]: E0121 11:32:12.322450 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-central-agent" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322456 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-central-agent" Jan 21 11:32:12 crc kubenswrapper[4925]: E0121 11:32:12.322470 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-api" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322486 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-api" Jan 21 11:32:12 crc kubenswrapper[4925]: E0121 11:32:12.322500 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-notification-agent" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322506 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-notification-agent" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322700 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-api" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322717 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="proxy-httpd" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322728 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-notification-agent" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322740 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="ceilometer-central-agent" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322748 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" containerName="sg-core" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.322758 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" containerName="watcher-kuttl-api-log" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.324715 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.331119 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.331355 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.331502 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.333611 4925 scope.go:117] "RemoveContainer" containerID="5a931a602f84dec188a493ab23928a2e795a0293644f8ac20e3a9b9a0457dfbe" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.336253 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.370946 4925 scope.go:117] "RemoveContainer" containerID="5ed03fbb64eb1b3089e897600e49432c63a04d574844ba1fe896bffd0dbfa385" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424564 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-log-httpd\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424638 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-run-httpd\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424680 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-scripts\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424716 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424818 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-config-data\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424848 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424883 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw7lq\" (UniqueName: \"kubernetes.io/projected/9208753c-4e0c-4c99-abf2-b7005c2c81d4-kube-api-access-zw7lq\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.424923 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.455074 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.481594 4925 scope.go:117] "RemoveContainer" containerID="2765da34011ea0c93705495808cc3926f804623ed99c4ff497aeebc3c99c993f" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.734960 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-config-data\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.735006 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.735037 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw7lq\" (UniqueName: \"kubernetes.io/projected/9208753c-4e0c-4c99-abf2-b7005c2c81d4-kube-api-access-zw7lq\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.735058 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.735118 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-log-httpd\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.735144 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-run-httpd\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.735177 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-scripts\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.735208 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.739741 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-run-httpd\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.740010 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-log-httpd\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.741245 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.745676 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.748999 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-config-data\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.749209 4925 scope.go:117] "RemoveContainer" containerID="ef13f034da643fab998cdd91c4cd6a07862ae3c895b06e81333eb3c5bb8547ad" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.751646 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.754909 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-scripts\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.757210 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.763630 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.766595 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw7lq\" (UniqueName: \"kubernetes.io/projected/9208753c-4e0c-4c99-abf2-b7005c2c81d4-kube-api-access-zw7lq\") pod \"ceilometer-0\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.836034 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvjbx\" (UniqueName: \"kubernetes.io/projected/dce28f97-8f72-40cb-909d-6beedda127d4-kube-api-access-mvjbx\") pod \"dce28f97-8f72-40cb-909d-6beedda127d4\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.836112 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-combined-ca-bundle\") pod \"dce28f97-8f72-40cb-909d-6beedda127d4\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.836194 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dce28f97-8f72-40cb-909d-6beedda127d4-logs\") pod \"dce28f97-8f72-40cb-909d-6beedda127d4\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.836360 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-cert-memcached-mtls\") pod \"dce28f97-8f72-40cb-909d-6beedda127d4\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.836463 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-config-data\") pod \"dce28f97-8f72-40cb-909d-6beedda127d4\" (UID: \"dce28f97-8f72-40cb-909d-6beedda127d4\") " Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.838388 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dce28f97-8f72-40cb-909d-6beedda127d4-logs" (OuterVolumeSpecName: "logs") pod "dce28f97-8f72-40cb-909d-6beedda127d4" (UID: "dce28f97-8f72-40cb-909d-6beedda127d4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.847810 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dce28f97-8f72-40cb-909d-6beedda127d4-kube-api-access-mvjbx" (OuterVolumeSpecName: "kube-api-access-mvjbx") pod "dce28f97-8f72-40cb-909d-6beedda127d4" (UID: "dce28f97-8f72-40cb-909d-6beedda127d4"). InnerVolumeSpecName "kube-api-access-mvjbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.910287 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dce28f97-8f72-40cb-909d-6beedda127d4" (UID: "dce28f97-8f72-40cb-909d-6beedda127d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.916609 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-config-data" (OuterVolumeSpecName: "config-data") pod "dce28f97-8f72-40cb-909d-6beedda127d4" (UID: "dce28f97-8f72-40cb-909d-6beedda127d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.938936 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.938996 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvjbx\" (UniqueName: \"kubernetes.io/projected/dce28f97-8f72-40cb-909d-6beedda127d4-kube-api-access-mvjbx\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.939012 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.939024 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dce28f97-8f72-40cb-909d-6beedda127d4-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.948952 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.981833 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:12 crc kubenswrapper[4925]: I0121 11:32:12.994562 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "dce28f97-8f72-40cb-909d-6beedda127d4" (UID: "dce28f97-8f72-40cb-909d-6beedda127d4"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.040632 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/dce28f97-8f72-40cb-909d-6beedda127d4-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.249335 4925 scope.go:117] "RemoveContainer" containerID="d2b72003c42b57064ec2add004d3461e5f0168dc7808e3ae51c083168392d1dd" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.287899 4925 generic.go:334] "Generic (PLEG): container finished" podID="928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" containerID="47649524245fb76a3e6d4079b389ee638b38aa1cc5768ac44750b71f28af6809" exitCode=0 Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.288225 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955","Type":"ContainerDied","Data":"47649524245fb76a3e6d4079b389ee638b38aa1cc5768ac44750b71f28af6809"} Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.292034 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" event={"ID":"29cd12b5-4aef-4b75-871b-82a3152f84b6","Type":"ContainerDied","Data":"511114bef797557267e84ba194482469adfa7be9eeb31e47c680cb3a836b3c63"} Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.292079 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="511114bef797557267e84ba194482469adfa7be9eeb31e47c680cb3a836b3c63" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.293983 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q69zm" event={"ID":"644e5583-d8a8-4351-af74-7d72252f2e9a","Type":"ContainerStarted","Data":"d900d011de833f5bd19fa9a34cdd7a53efcd435de1eee98f99f5bb72b1b7033e"} Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.302449 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"dce28f97-8f72-40cb-909d-6beedda127d4","Type":"ContainerDied","Data":"fb6d08a1efd4f69256a0ea8c58b7bda9ab086cdf552258d4d6ae30611b6a5184"} Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.302508 4925 scope.go:117] "RemoveContainer" containerID="76d593cdcee4c1aa4a368e37f7fadcec29cc3dcf41fd208669f9dcfd7a251742" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.302767 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.331587 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q69zm" podStartSLOduration=4.099547006 podStartE2EDuration="13.33156055s" podCreationTimestamp="2026-01-21 11:32:00 +0000 UTC" firstStartedPulling="2026-01-21 11:32:02.496732206 +0000 UTC m=+2214.100624160" lastFinishedPulling="2026-01-21 11:32:11.72874578 +0000 UTC m=+2223.332637704" observedRunningTime="2026-01-21 11:32:13.322715772 +0000 UTC m=+2224.926607706" watchObservedRunningTime="2026-01-21 11:32:13.33156055 +0000 UTC m=+2224.935452484" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.369287 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.399953 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.420921 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.519985 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1019a31b-2bb6-4e0b-bf17-950052095e18" path="/var/lib/kubelet/pods/1019a31b-2bb6-4e0b-bf17-950052095e18/volumes" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.521627 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbh2p\" (UniqueName: \"kubernetes.io/projected/29cd12b5-4aef-4b75-871b-82a3152f84b6-kube-api-access-nbh2p\") pod \"29cd12b5-4aef-4b75-871b-82a3152f84b6\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.521665 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dce28f97-8f72-40cb-909d-6beedda127d4" path="/var/lib/kubelet/pods/dce28f97-8f72-40cb-909d-6beedda127d4/volumes" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.521765 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29cd12b5-4aef-4b75-871b-82a3152f84b6-operator-scripts\") pod \"29cd12b5-4aef-4b75-871b-82a3152f84b6\" (UID: \"29cd12b5-4aef-4b75-871b-82a3152f84b6\") " Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.522913 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29cd12b5-4aef-4b75-871b-82a3152f84b6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29cd12b5-4aef-4b75-871b-82a3152f84b6" (UID: "29cd12b5-4aef-4b75-871b-82a3152f84b6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.522470 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efea85ac-ede7-455e-9d14-af3388f2d2a8" path="/var/lib/kubelet/pods/efea85ac-ede7-455e-9d14-af3388f2d2a8/volumes" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.528911 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29cd12b5-4aef-4b75-871b-82a3152f84b6-kube-api-access-nbh2p" (OuterVolumeSpecName: "kube-api-access-nbh2p") pod "29cd12b5-4aef-4b75-871b-82a3152f84b6" (UID: "29cd12b5-4aef-4b75-871b-82a3152f84b6"). InnerVolumeSpecName "kube-api-access-nbh2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.624556 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbh2p\" (UniqueName: \"kubernetes.io/projected/29cd12b5-4aef-4b75-871b-82a3152f84b6-kube-api-access-nbh2p\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.624607 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29cd12b5-4aef-4b75-871b-82a3152f84b6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:13 crc kubenswrapper[4925]: I0121 11:32:13.664630 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.158951 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.312994 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955","Type":"ContainerDied","Data":"945892f39543d38c00b10d1baefbd1734610bbe890db7ebd457ef168ba23de12"} Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.313029 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.313061 4925 scope.go:117] "RemoveContainer" containerID="47649524245fb76a3e6d4079b389ee638b38aa1cc5768ac44750b71f28af6809" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.315505 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher17b2-account-delete-s4vdj" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.315793 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerStarted","Data":"a5deb341a8850a95e358ae06e19408896850ecad0a9a43a234dc380c6b84d216"} Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.345367 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-logs\") pod \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.345535 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tbrb\" (UniqueName: \"kubernetes.io/projected/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-kube-api-access-6tbrb\") pod \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.345587 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-cert-memcached-mtls\") pod \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.345639 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-combined-ca-bundle\") pod \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.345666 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-config-data\") pod \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.345710 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-custom-prometheus-ca\") pod \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\" (UID: \"928d7ddd-877b-4d3f-aa6b-e04e0fcd8955\") " Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.345992 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-logs" (OuterVolumeSpecName: "logs") pod "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" (UID: "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.346435 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.354508 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-kube-api-access-6tbrb" (OuterVolumeSpecName: "kube-api-access-6tbrb") pod "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" (UID: "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955"). InnerVolumeSpecName "kube-api-access-6tbrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.405072 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" (UID: "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.448362 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tbrb\" (UniqueName: \"kubernetes.io/projected/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-kube-api-access-6tbrb\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.448417 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.468993 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" (UID: "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.477165 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" (UID: "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.483250 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-config-data" (OuterVolumeSpecName: "config-data") pod "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" (UID: "928d7ddd-877b-4d3f-aa6b-e04e0fcd8955"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.551627 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.551710 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.551721 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.656120 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:32:14 crc kubenswrapper[4925]: I0121 11:32:14.663048 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:32:15 crc kubenswrapper[4925]: I0121 11:32:15.326073 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerStarted","Data":"79cd498396ebdb8b92bf22e5c3ae82c82e37c89718e27758b9374f6d7fc20e41"} Jan 21 11:32:15 crc kubenswrapper[4925]: I0121 11:32:15.520110 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" path="/var/lib/kubelet/pods/928d7ddd-877b-4d3f-aa6b-e04e0fcd8955/volumes" Jan 21 11:32:16 crc kubenswrapper[4925]: I0121 11:32:16.258216 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-f8llq"] Jan 21 11:32:16 crc kubenswrapper[4925]: I0121 11:32:16.268614 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-f8llq"] Jan 21 11:32:16 crc kubenswrapper[4925]: I0121 11:32:16.276193 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher17b2-account-delete-s4vdj"] Jan 21 11:32:16 crc kubenswrapper[4925]: I0121 11:32:16.311744 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-17b2-account-create-update-f46xj"] Jan 21 11:32:16 crc kubenswrapper[4925]: I0121 11:32:16.335248 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher17b2-account-delete-s4vdj"] Jan 21 11:32:16 crc kubenswrapper[4925]: I0121 11:32:16.341576 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-17b2-account-create-update-f46xj"] Jan 21 11:32:16 crc kubenswrapper[4925]: I0121 11:32:16.419612 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerStarted","Data":"4207aaf13a3f4d4a7c6f6222b650f0496cb8dacdcc9d6b3541b4c40edba3d15f"} Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.052776 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-jzpjj"] Jan 21 11:32:17 crc kubenswrapper[4925]: E0121 11:32:17.053387 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dce28f97-8f72-40cb-909d-6beedda127d4" containerName="watcher-applier" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.053514 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="dce28f97-8f72-40cb-909d-6beedda127d4" containerName="watcher-applier" Jan 21 11:32:17 crc kubenswrapper[4925]: E0121 11:32:17.053532 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" containerName="watcher-decision-engine" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.053541 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" containerName="watcher-decision-engine" Jan 21 11:32:17 crc kubenswrapper[4925]: E0121 11:32:17.053599 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cd12b5-4aef-4b75-871b-82a3152f84b6" containerName="mariadb-account-delete" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.053615 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cd12b5-4aef-4b75-871b-82a3152f84b6" containerName="mariadb-account-delete" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.053892 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="dce28f97-8f72-40cb-909d-6beedda127d4" containerName="watcher-applier" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.053934 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="29cd12b5-4aef-4b75-871b-82a3152f84b6" containerName="mariadb-account-delete" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.053976 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="928d7ddd-877b-4d3f-aa6b-e04e0fcd8955" containerName="watcher-decision-engine" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.054995 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.071303 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-jzpjj"] Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.093825 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-50a1-account-create-update-h44lh"] Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.097610 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.210577 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7xjm\" (UniqueName: \"kubernetes.io/projected/99daa308-0637-441e-86ae-8692e2155898-kube-api-access-g7xjm\") pod \"watcher-50a1-account-create-update-h44lh\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.210969 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-operator-scripts\") pod \"watcher-db-create-jzpjj\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.211221 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-999gx\" (UniqueName: \"kubernetes.io/projected/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-kube-api-access-999gx\") pod \"watcher-db-create-jzpjj\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.211374 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99daa308-0637-441e-86ae-8692e2155898-operator-scripts\") pod \"watcher-50a1-account-create-update-h44lh\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.213602 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-50a1-account-create-update-h44lh"] Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.215447 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.312255 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99daa308-0637-441e-86ae-8692e2155898-operator-scripts\") pod \"watcher-50a1-account-create-update-h44lh\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.312363 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7xjm\" (UniqueName: \"kubernetes.io/projected/99daa308-0637-441e-86ae-8692e2155898-kube-api-access-g7xjm\") pod \"watcher-50a1-account-create-update-h44lh\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.312478 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-operator-scripts\") pod \"watcher-db-create-jzpjj\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.312593 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-999gx\" (UniqueName: \"kubernetes.io/projected/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-kube-api-access-999gx\") pod \"watcher-db-create-jzpjj\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.313288 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99daa308-0637-441e-86ae-8692e2155898-operator-scripts\") pod \"watcher-50a1-account-create-update-h44lh\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.314183 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-operator-scripts\") pod \"watcher-db-create-jzpjj\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.348581 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7xjm\" (UniqueName: \"kubernetes.io/projected/99daa308-0637-441e-86ae-8692e2155898-kube-api-access-g7xjm\") pod \"watcher-50a1-account-create-update-h44lh\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.349098 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-999gx\" (UniqueName: \"kubernetes.io/projected/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-kube-api-access-999gx\") pod \"watcher-db-create-jzpjj\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.384011 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.465422 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerStarted","Data":"ed433d149b82ada44852effca30d77ebf6c86d7c2c0c4bf3200a46dd26ac5181"} Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.541341 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.554955 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29cd12b5-4aef-4b75-871b-82a3152f84b6" path="/var/lib/kubelet/pods/29cd12b5-4aef-4b75-871b-82a3152f84b6/volumes" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.556874 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d044e09-ded4-4821-8968-b1837b9fee65" path="/var/lib/kubelet/pods/8d044e09-ded4-4821-8968-b1837b9fee65/volumes" Jan 21 11:32:17 crc kubenswrapper[4925]: I0121 11:32:17.562204 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1a21414-a4cb-4af5-8fb5-1fafdbea797e" path="/var/lib/kubelet/pods/e1a21414-a4cb-4af5-8fb5-1fafdbea797e/volumes" Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.071444 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-jzpjj"] Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.418769 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-50a1-account-create-update-h44lh"] Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.475912 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-jzpjj" event={"ID":"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc","Type":"ContainerStarted","Data":"2da2ec890e77a105493569b7fff28aeafd352c3a564c79f216cb941b8a7d9a5a"} Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.478589 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" event={"ID":"99daa308-0637-441e-86ae-8692e2155898","Type":"ContainerStarted","Data":"8e7e404b2e9f060f895086038b8e4dfd14b9252255d071bef84b6aa1c1220d8f"} Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.942901 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zg97f"] Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.945438 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.961488 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zg97f"] Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.996151 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfkd8\" (UniqueName: \"kubernetes.io/projected/deaf0a13-510b-445b-8dae-a60d6d385a8a-kube-api-access-xfkd8\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.996258 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-utilities\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:18 crc kubenswrapper[4925]: I0121 11:32:18.996429 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-catalog-content\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.097489 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfkd8\" (UniqueName: \"kubernetes.io/projected/deaf0a13-510b-445b-8dae-a60d6d385a8a-kube-api-access-xfkd8\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.098133 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-utilities\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.098292 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-catalog-content\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.099002 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-catalog-content\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.099794 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-utilities\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.130475 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfkd8\" (UniqueName: \"kubernetes.io/projected/deaf0a13-510b-445b-8dae-a60d6d385a8a-kube-api-access-xfkd8\") pod \"certified-operators-zg97f\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.265685 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.947144 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:32:19 crc kubenswrapper[4925]: I0121 11:32:19.947544 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.325240 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zg97f"] Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.613080 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-central-agent" containerID="cri-o://79cd498396ebdb8b92bf22e5c3ae82c82e37c89718e27758b9374f6d7fc20e41" gracePeriod=30 Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.613545 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerStarted","Data":"25e4fbc76c61f78bb5c658ecc092940f1810b1b0262c01b01a5e384b7e9dae04"} Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.613600 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.613921 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-notification-agent" containerID="cri-o://4207aaf13a3f4d4a7c6f6222b650f0496cb8dacdcc9d6b3541b4c40edba3d15f" gracePeriod=30 Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.613931 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="proxy-httpd" containerID="cri-o://25e4fbc76c61f78bb5c658ecc092940f1810b1b0262c01b01a5e384b7e9dae04" gracePeriod=30 Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.614009 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="sg-core" containerID="cri-o://ed433d149b82ada44852effca30d77ebf6c86d7c2c0c4bf3200a46dd26ac5181" gracePeriod=30 Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.626690 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" event={"ID":"99daa308-0637-441e-86ae-8692e2155898","Type":"ContainerStarted","Data":"3c2983054a582e5906ee1dc2d463270d5208b3ae3c645a4f34ed5d3c0833ebfb"} Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.641989 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-jzpjj" event={"ID":"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc","Type":"ContainerStarted","Data":"0704954ae6a2f2c6582908b25b084a33b6916e4418a9e6b9a54be7ea13cd3683"} Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.652999 4925 generic.go:334] "Generic (PLEG): container finished" podID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerID="99d34b739e2de0e15bf6874802f9497ff74a2f12b3c013395112189fe3f429fd" exitCode=0 Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.653059 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zg97f" event={"ID":"deaf0a13-510b-445b-8dae-a60d6d385a8a","Type":"ContainerDied","Data":"99d34b739e2de0e15bf6874802f9497ff74a2f12b3c013395112189fe3f429fd"} Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.653090 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zg97f" event={"ID":"deaf0a13-510b-445b-8dae-a60d6d385a8a","Type":"ContainerStarted","Data":"305d8b2a4da488ceee3cdbf94dea5b93b7d7955868fbd71a9b47499455988201"} Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.655635 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=4.556589693 podStartE2EDuration="8.655613918s" podCreationTimestamp="2026-01-21 11:32:12 +0000 UTC" firstStartedPulling="2026-01-21 11:32:13.676547052 +0000 UTC m=+2225.280438996" lastFinishedPulling="2026-01-21 11:32:17.775571287 +0000 UTC m=+2229.379463221" observedRunningTime="2026-01-21 11:32:20.644128406 +0000 UTC m=+2232.248020360" watchObservedRunningTime="2026-01-21 11:32:20.655613918 +0000 UTC m=+2232.259505852" Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.675050 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.676568 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:20 crc kubenswrapper[4925]: I0121 11:32:20.690929 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" podStartSLOduration=3.690903928 podStartE2EDuration="3.690903928s" podCreationTimestamp="2026-01-21 11:32:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:32:20.687517362 +0000 UTC m=+2232.291409306" watchObservedRunningTime="2026-01-21 11:32:20.690903928 +0000 UTC m=+2232.294795862" Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.734472 4925 generic.go:334] "Generic (PLEG): container finished" podID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerID="25e4fbc76c61f78bb5c658ecc092940f1810b1b0262c01b01a5e384b7e9dae04" exitCode=0 Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.734876 4925 generic.go:334] "Generic (PLEG): container finished" podID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerID="ed433d149b82ada44852effca30d77ebf6c86d7c2c0c4bf3200a46dd26ac5181" exitCode=2 Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.734892 4925 generic.go:334] "Generic (PLEG): container finished" podID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerID="4207aaf13a3f4d4a7c6f6222b650f0496cb8dacdcc9d6b3541b4c40edba3d15f" exitCode=0 Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.734635 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerDied","Data":"25e4fbc76c61f78bb5c658ecc092940f1810b1b0262c01b01a5e384b7e9dae04"} Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.735015 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerDied","Data":"ed433d149b82ada44852effca30d77ebf6c86d7c2c0c4bf3200a46dd26ac5181"} Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.735045 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerDied","Data":"4207aaf13a3f4d4a7c6f6222b650f0496cb8dacdcc9d6b3541b4c40edba3d15f"} Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.742899 4925 generic.go:334] "Generic (PLEG): container finished" podID="99daa308-0637-441e-86ae-8692e2155898" containerID="3c2983054a582e5906ee1dc2d463270d5208b3ae3c645a4f34ed5d3c0833ebfb" exitCode=0 Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.742982 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" event={"ID":"99daa308-0637-441e-86ae-8692e2155898","Type":"ContainerDied","Data":"3c2983054a582e5906ee1dc2d463270d5208b3ae3c645a4f34ed5d3c0833ebfb"} Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.748241 4925 generic.go:334] "Generic (PLEG): container finished" podID="96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc" containerID="0704954ae6a2f2c6582908b25b084a33b6916e4418a9e6b9a54be7ea13cd3683" exitCode=0 Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.748295 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-jzpjj" event={"ID":"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc","Type":"ContainerDied","Data":"0704954ae6a2f2c6582908b25b084a33b6916e4418a9e6b9a54be7ea13cd3683"} Jan 21 11:32:21 crc kubenswrapper[4925]: I0121 11:32:21.793932 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q69zm" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="registry-server" probeResult="failure" output=< Jan 21 11:32:21 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:32:21 crc kubenswrapper[4925]: > Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.175851 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.326519 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-operator-scripts\") pod \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.326665 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-999gx\" (UniqueName: \"kubernetes.io/projected/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-kube-api-access-999gx\") pod \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\" (UID: \"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc\") " Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.327974 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc" (UID: "96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.334207 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-kube-api-access-999gx" (OuterVolumeSpecName: "kube-api-access-999gx") pod "96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc" (UID: "96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc"). InnerVolumeSpecName "kube-api-access-999gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.428732 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.428780 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-999gx\" (UniqueName: \"kubernetes.io/projected/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc-kube-api-access-999gx\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.759273 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-jzpjj" event={"ID":"96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc","Type":"ContainerDied","Data":"2da2ec890e77a105493569b7fff28aeafd352c3a564c79f216cb941b8a7d9a5a"} Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.759320 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2da2ec890e77a105493569b7fff28aeafd352c3a564c79f216cb941b8a7d9a5a" Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.759348 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-jzpjj" Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.769220 4925 generic.go:334] "Generic (PLEG): container finished" podID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerID="79cd498396ebdb8b92bf22e5c3ae82c82e37c89718e27758b9374f6d7fc20e41" exitCode=0 Jan 21 11:32:22 crc kubenswrapper[4925]: I0121 11:32:22.769503 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerDied","Data":"79cd498396ebdb8b92bf22e5c3ae82c82e37c89718e27758b9374f6d7fc20e41"} Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.332205 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.339288 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" event={"ID":"99daa308-0637-441e-86ae-8692e2155898","Type":"ContainerDied","Data":"8e7e404b2e9f060f895086038b8e4dfd14b9252255d071bef84b6aa1c1220d8f"} Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.339351 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e7e404b2e9f060f895086038b8e4dfd14b9252255d071bef84b6aa1c1220d8f" Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.475835 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7xjm\" (UniqueName: \"kubernetes.io/projected/99daa308-0637-441e-86ae-8692e2155898-kube-api-access-g7xjm\") pod \"99daa308-0637-441e-86ae-8692e2155898\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.476205 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99daa308-0637-441e-86ae-8692e2155898-operator-scripts\") pod \"99daa308-0637-441e-86ae-8692e2155898\" (UID: \"99daa308-0637-441e-86ae-8692e2155898\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.477765 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99daa308-0637-441e-86ae-8692e2155898-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "99daa308-0637-441e-86ae-8692e2155898" (UID: "99daa308-0637-441e-86ae-8692e2155898"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.498766 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99daa308-0637-441e-86ae-8692e2155898-kube-api-access-g7xjm" (OuterVolumeSpecName: "kube-api-access-g7xjm") pod "99daa308-0637-441e-86ae-8692e2155898" (UID: "99daa308-0637-441e-86ae-8692e2155898"). InnerVolumeSpecName "kube-api-access-g7xjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.584829 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7xjm\" (UniqueName: \"kubernetes.io/projected/99daa308-0637-441e-86ae-8692e2155898-kube-api-access-g7xjm\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.584858 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99daa308-0637-441e-86ae-8692e2155898-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.737599 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.894273 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-sg-core-conf-yaml\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.894386 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-config-data\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.894539 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-run-httpd\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.894753 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-ceilometer-tls-certs\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.894828 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-scripts\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.894923 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-combined-ca-bundle\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.894976 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-log-httpd\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:24 crc kubenswrapper[4925]: I0121 11:32:24.895022 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zw7lq\" (UniqueName: \"kubernetes.io/projected/9208753c-4e0c-4c99-abf2-b7005c2c81d4-kube-api-access-zw7lq\") pod \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\" (UID: \"9208753c-4e0c-4c99-abf2-b7005c2c81d4\") " Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.038837 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.038931 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.042746 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-scripts" (OuterVolumeSpecName: "scripts") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.052884 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9208753c-4e0c-4c99-abf2-b7005c2c81d4-kube-api-access-zw7lq" (OuterVolumeSpecName: "kube-api-access-zw7lq") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "kube-api-access-zw7lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.121607 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.145095 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.145486 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.145615 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zw7lq\" (UniqueName: \"kubernetes.io/projected/9208753c-4e0c-4c99-abf2-b7005c2c81d4-kube-api-access-zw7lq\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.145709 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.145793 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9208753c-4e0c-4c99-abf2-b7005c2c81d4-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.229702 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-config-data" (OuterVolumeSpecName: "config-data") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.247592 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.255618 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.281444 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "9208753c-4e0c-4c99-abf2-b7005c2c81d4" (UID: "9208753c-4e0c-4c99-abf2-b7005c2c81d4"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.349606 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.349880 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9208753c-4e0c-4c99-abf2-b7005c2c81d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.372605 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"9208753c-4e0c-4c99-abf2-b7005c2c81d4","Type":"ContainerDied","Data":"a5deb341a8850a95e358ae06e19408896850ecad0a9a43a234dc380c6b84d216"} Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.372696 4925 scope.go:117] "RemoveContainer" containerID="25e4fbc76c61f78bb5c658ecc092940f1810b1b0262c01b01a5e384b7e9dae04" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.372962 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.413103 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-50a1-account-create-update-h44lh" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.415047 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zg97f" event={"ID":"deaf0a13-510b-445b-8dae-a60d6d385a8a","Type":"ContainerStarted","Data":"4e80dc3ec843089df0f6462fad6a150737d97415cebebb73eb3a5ff22ffb1cef"} Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.442661 4925 scope.go:117] "RemoveContainer" containerID="ed433d149b82ada44852effca30d77ebf6c86d7c2c0c4bf3200a46dd26ac5181" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.465504 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.478667 4925 scope.go:117] "RemoveContainer" containerID="4207aaf13a3f4d4a7c6f6222b650f0496cb8dacdcc9d6b3541b4c40edba3d15f" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.495556 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.536608 4925 scope.go:117] "RemoveContainer" containerID="79cd498396ebdb8b92bf22e5c3ae82c82e37c89718e27758b9374f6d7fc20e41" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.552217 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" path="/var/lib/kubelet/pods/9208753c-4e0c-4c99-abf2-b7005c2c81d4/volumes" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.556931 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:25 crc kubenswrapper[4925]: E0121 11:32:25.557370 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="sg-core" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.737218 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="sg-core" Jan 21 11:32:25 crc kubenswrapper[4925]: E0121 11:32:25.737676 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-central-agent" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.737797 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-central-agent" Jan 21 11:32:25 crc kubenswrapper[4925]: E0121 11:32:25.737904 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="proxy-httpd" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.737986 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="proxy-httpd" Jan 21 11:32:25 crc kubenswrapper[4925]: E0121 11:32:25.738063 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99daa308-0637-441e-86ae-8692e2155898" containerName="mariadb-account-create-update" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.738265 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="99daa308-0637-441e-86ae-8692e2155898" containerName="mariadb-account-create-update" Jan 21 11:32:25 crc kubenswrapper[4925]: E0121 11:32:25.738370 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc" containerName="mariadb-database-create" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.738448 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc" containerName="mariadb-database-create" Jan 21 11:32:25 crc kubenswrapper[4925]: E0121 11:32:25.738540 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-notification-agent" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.738627 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-notification-agent" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.739132 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="proxy-httpd" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.739229 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="sg-core" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.739298 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-central-agent" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.739362 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc" containerName="mariadb-database-create" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.739450 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9208753c-4e0c-4c99-abf2-b7005c2c81d4" containerName="ceilometer-notification-agent" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.739527 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="99daa308-0637-441e-86ae-8692e2155898" containerName="mariadb-account-create-update" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.741518 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.752069 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.752339 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.752480 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.797465 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842672 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-scripts\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842724 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kshmh\" (UniqueName: \"kubernetes.io/projected/c5109392-5870-4911-b674-2f78cf27c0ca-kube-api-access-kshmh\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842789 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-log-httpd\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842815 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842847 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-run-httpd\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842915 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842938 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.842977 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-config-data\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.945893 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.945959 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.946021 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-config-data\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.946096 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-scripts\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.946118 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kshmh\" (UniqueName: \"kubernetes.io/projected/c5109392-5870-4911-b674-2f78cf27c0ca-kube-api-access-kshmh\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.946182 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-log-httpd\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.946200 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.946227 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-run-httpd\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.946821 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-run-httpd\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.947438 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-log-httpd\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.952556 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.957533 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-scripts\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.957649 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.958668 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-config-data\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.972686 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kshmh\" (UniqueName: \"kubernetes.io/projected/c5109392-5870-4911-b674-2f78cf27c0ca-kube-api-access-kshmh\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:25 crc kubenswrapper[4925]: I0121 11:32:25.980625 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:26 crc kubenswrapper[4925]: I0121 11:32:26.142176 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:26 crc kubenswrapper[4925]: I0121 11:32:26.425935 4925 generic.go:334] "Generic (PLEG): container finished" podID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerID="4e80dc3ec843089df0f6462fad6a150737d97415cebebb73eb3a5ff22ffb1cef" exitCode=0 Jan 21 11:32:26 crc kubenswrapper[4925]: I0121 11:32:26.426170 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zg97f" event={"ID":"deaf0a13-510b-445b-8dae-a60d6d385a8a","Type":"ContainerDied","Data":"4e80dc3ec843089df0f6462fad6a150737d97415cebebb73eb3a5ff22ffb1cef"} Jan 21 11:32:26 crc kubenswrapper[4925]: I0121 11:32:26.681825 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:26 crc kubenswrapper[4925]: W0121 11:32:26.688126 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5109392_5870_4911_b674_2f78cf27c0ca.slice/crio-896c47a69c7c8978b6540ee3e4fafb0e13cf0b01bbe0abe8971dcbdb81375464 WatchSource:0}: Error finding container 896c47a69c7c8978b6540ee3e4fafb0e13cf0b01bbe0abe8971dcbdb81375464: Status 404 returned error can't find the container with id 896c47a69c7c8978b6540ee3e4fafb0e13cf0b01bbe0abe8971dcbdb81375464 Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.441212 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zg97f" event={"ID":"deaf0a13-510b-445b-8dae-a60d6d385a8a","Type":"ContainerStarted","Data":"2eda51430ab37ae670085739359b9479998297eede92ceeb577a689345e93a29"} Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.443266 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerStarted","Data":"896c47a69c7c8978b6540ee3e4fafb0e13cf0b01bbe0abe8971dcbdb81375464"} Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.560426 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-497rl"] Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.562029 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.569196 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.570946 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-gvjl6" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.576950 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-config-data\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.576997 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtw47\" (UniqueName: \"kubernetes.io/projected/0742dce7-0bc4-4c67-a1fa-abfa1921d449-kube-api-access-gtw47\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.577447 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-db-sync-config-data\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.577656 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.587134 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-497rl"] Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.681384 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.681497 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-config-data\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.681519 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtw47\" (UniqueName: \"kubernetes.io/projected/0742dce7-0bc4-4c67-a1fa-abfa1921d449-kube-api-access-gtw47\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.681633 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-db-sync-config-data\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.689817 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-db-sync-config-data\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.689966 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-config-data\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.690760 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.703289 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtw47\" (UniqueName: \"kubernetes.io/projected/0742dce7-0bc4-4c67-a1fa-abfa1921d449-kube-api-access-gtw47\") pod \"watcher-kuttl-db-sync-497rl\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:27 crc kubenswrapper[4925]: I0121 11:32:27.797362 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:28 crc kubenswrapper[4925]: I0121 11:32:28.310882 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-497rl"] Jan 21 11:32:28 crc kubenswrapper[4925]: I0121 11:32:28.602164 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" event={"ID":"0742dce7-0bc4-4c67-a1fa-abfa1921d449","Type":"ContainerStarted","Data":"ce019433304c1d318c6bf373367d5c42e9200351f80dc52095f137b78dc0a8e8"} Jan 21 11:32:28 crc kubenswrapper[4925]: I0121 11:32:28.613094 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerStarted","Data":"60e05be369a70403b1d12d1d0a3a825a6c31d501fde6f57728c19646e856e254"} Jan 21 11:32:28 crc kubenswrapper[4925]: I0121 11:32:28.681211 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zg97f" podStartSLOduration=4.289306616 podStartE2EDuration="10.68117752s" podCreationTimestamp="2026-01-21 11:32:18 +0000 UTC" firstStartedPulling="2026-01-21 11:32:20.656722612 +0000 UTC m=+2232.260614546" lastFinishedPulling="2026-01-21 11:32:27.048593516 +0000 UTC m=+2238.652485450" observedRunningTime="2026-01-21 11:32:28.670351739 +0000 UTC m=+2240.274243673" watchObservedRunningTime="2026-01-21 11:32:28.68117752 +0000 UTC m=+2240.285069454" Jan 21 11:32:29 crc kubenswrapper[4925]: I0121 11:32:29.331583 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:29 crc kubenswrapper[4925]: I0121 11:32:29.332752 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:29 crc kubenswrapper[4925]: I0121 11:32:29.622256 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerStarted","Data":"a1b44f02fed53e80bc0bf82fd6439fbd7535569ff5d8e46d3e83e189fe585009"} Jan 21 11:32:29 crc kubenswrapper[4925]: I0121 11:32:29.624691 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" event={"ID":"0742dce7-0bc4-4c67-a1fa-abfa1921d449","Type":"ContainerStarted","Data":"9304c06a63c95329cfd0850ab0a95543341e741c4dfd34929e328f10293c90dd"} Jan 21 11:32:29 crc kubenswrapper[4925]: I0121 11:32:29.663782 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" podStartSLOduration=2.663759024 podStartE2EDuration="2.663759024s" podCreationTimestamp="2026-01-21 11:32:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:32:29.661271856 +0000 UTC m=+2241.265163790" watchObservedRunningTime="2026-01-21 11:32:29.663759024 +0000 UTC m=+2241.267650978" Jan 21 11:32:30 crc kubenswrapper[4925]: I0121 11:32:30.390623 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-zg97f" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="registry-server" probeResult="failure" output=< Jan 21 11:32:30 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:32:30 crc kubenswrapper[4925]: > Jan 21 11:32:30 crc kubenswrapper[4925]: I0121 11:32:30.638755 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerStarted","Data":"30cabfecac6697e249e5976f11acaf774fbe53c2c1bb8872fe3b983bbca1c3cf"} Jan 21 11:32:30 crc kubenswrapper[4925]: I0121 11:32:30.887970 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:30 crc kubenswrapper[4925]: I0121 11:32:30.939793 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:32 crc kubenswrapper[4925]: I0121 11:32:32.868865 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerStarted","Data":"8936c9a16c3fb1847956140594253afafdb741f4644a4c4a45ddb1737a966c69"} Jan 21 11:32:32 crc kubenswrapper[4925]: I0121 11:32:32.869316 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:32 crc kubenswrapper[4925]: I0121 11:32:32.897657 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.305045045 podStartE2EDuration="7.897631104s" podCreationTimestamp="2026-01-21 11:32:25 +0000 UTC" firstStartedPulling="2026-01-21 11:32:26.6902959 +0000 UTC m=+2238.294187834" lastFinishedPulling="2026-01-21 11:32:32.282881959 +0000 UTC m=+2243.886773893" observedRunningTime="2026-01-21 11:32:32.892505152 +0000 UTC m=+2244.496397096" watchObservedRunningTime="2026-01-21 11:32:32.897631104 +0000 UTC m=+2244.501523038" Jan 21 11:32:34 crc kubenswrapper[4925]: I0121 11:32:34.928137 4925 generic.go:334] "Generic (PLEG): container finished" podID="0742dce7-0bc4-4c67-a1fa-abfa1921d449" containerID="9304c06a63c95329cfd0850ab0a95543341e741c4dfd34929e328f10293c90dd" exitCode=0 Jan 21 11:32:34 crc kubenswrapper[4925]: I0121 11:32:34.928482 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" event={"ID":"0742dce7-0bc4-4c67-a1fa-abfa1921d449","Type":"ContainerDied","Data":"9304c06a63c95329cfd0850ab0a95543341e741c4dfd34929e328f10293c90dd"} Jan 21 11:32:35 crc kubenswrapper[4925]: I0121 11:32:35.329988 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q69zm"] Jan 21 11:32:35 crc kubenswrapper[4925]: I0121 11:32:35.331113 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q69zm" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="registry-server" containerID="cri-o://d900d011de833f5bd19fa9a34cdd7a53efcd435de1eee98f99f5bb72b1b7033e" gracePeriod=2 Jan 21 11:32:35 crc kubenswrapper[4925]: I0121 11:32:35.944921 4925 generic.go:334] "Generic (PLEG): container finished" podID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerID="d900d011de833f5bd19fa9a34cdd7a53efcd435de1eee98f99f5bb72b1b7033e" exitCode=0 Jan 21 11:32:35 crc kubenswrapper[4925]: I0121 11:32:35.945778 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q69zm" event={"ID":"644e5583-d8a8-4351-af74-7d72252f2e9a","Type":"ContainerDied","Data":"d900d011de833f5bd19fa9a34cdd7a53efcd435de1eee98f99f5bb72b1b7033e"} Jan 21 11:32:35 crc kubenswrapper[4925]: I0121 11:32:35.945846 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q69zm" event={"ID":"644e5583-d8a8-4351-af74-7d72252f2e9a","Type":"ContainerDied","Data":"7ddf5662a41e7c88fb34d98be5e80565fd7826adc5c84fac7f4a423fe71c3f89"} Jan 21 11:32:35 crc kubenswrapper[4925]: I0121 11:32:35.945867 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ddf5662a41e7c88fb34d98be5e80565fd7826adc5c84fac7f4a423fe71c3f89" Jan 21 11:32:35 crc kubenswrapper[4925]: I0121 11:32:35.961212 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.088852 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-utilities\") pod \"644e5583-d8a8-4351-af74-7d72252f2e9a\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.088920 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-catalog-content\") pod \"644e5583-d8a8-4351-af74-7d72252f2e9a\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.088992 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zc4c\" (UniqueName: \"kubernetes.io/projected/644e5583-d8a8-4351-af74-7d72252f2e9a-kube-api-access-5zc4c\") pod \"644e5583-d8a8-4351-af74-7d72252f2e9a\" (UID: \"644e5583-d8a8-4351-af74-7d72252f2e9a\") " Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.091004 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-utilities" (OuterVolumeSpecName: "utilities") pod "644e5583-d8a8-4351-af74-7d72252f2e9a" (UID: "644e5583-d8a8-4351-af74-7d72252f2e9a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.099951 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/644e5583-d8a8-4351-af74-7d72252f2e9a-kube-api-access-5zc4c" (OuterVolumeSpecName: "kube-api-access-5zc4c") pod "644e5583-d8a8-4351-af74-7d72252f2e9a" (UID: "644e5583-d8a8-4351-af74-7d72252f2e9a"). InnerVolumeSpecName "kube-api-access-5zc4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.202736 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.202794 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zc4c\" (UniqueName: \"kubernetes.io/projected/644e5583-d8a8-4351-af74-7d72252f2e9a-kube-api-access-5zc4c\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.277461 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "644e5583-d8a8-4351-af74-7d72252f2e9a" (UID: "644e5583-d8a8-4351-af74-7d72252f2e9a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.308424 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/644e5583-d8a8-4351-af74-7d72252f2e9a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.372098 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.410322 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-config-data\") pod \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.410629 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtw47\" (UniqueName: \"kubernetes.io/projected/0742dce7-0bc4-4c67-a1fa-abfa1921d449-kube-api-access-gtw47\") pod \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.410704 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-db-sync-config-data\") pod \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.410801 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-combined-ca-bundle\") pod \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\" (UID: \"0742dce7-0bc4-4c67-a1fa-abfa1921d449\") " Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.417857 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0742dce7-0bc4-4c67-a1fa-abfa1921d449-kube-api-access-gtw47" (OuterVolumeSpecName: "kube-api-access-gtw47") pod "0742dce7-0bc4-4c67-a1fa-abfa1921d449" (UID: "0742dce7-0bc4-4c67-a1fa-abfa1921d449"). InnerVolumeSpecName "kube-api-access-gtw47". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.422039 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0742dce7-0bc4-4c67-a1fa-abfa1921d449" (UID: "0742dce7-0bc4-4c67-a1fa-abfa1921d449"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.438769 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0742dce7-0bc4-4c67-a1fa-abfa1921d449" (UID: "0742dce7-0bc4-4c67-a1fa-abfa1921d449"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.485643 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-config-data" (OuterVolumeSpecName: "config-data") pod "0742dce7-0bc4-4c67-a1fa-abfa1921d449" (UID: "0742dce7-0bc4-4c67-a1fa-abfa1921d449"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.518568 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.518604 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.518632 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtw47\" (UniqueName: \"kubernetes.io/projected/0742dce7-0bc4-4c67-a1fa-abfa1921d449-kube-api-access-gtw47\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.518646 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0742dce7-0bc4-4c67-a1fa-abfa1921d449-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.956140 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q69zm" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.959503 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.959887 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-497rl" event={"ID":"0742dce7-0bc4-4c67-a1fa-abfa1921d449","Type":"ContainerDied","Data":"ce019433304c1d318c6bf373367d5c42e9200351f80dc52095f137b78dc0a8e8"} Jan 21 11:32:36 crc kubenswrapper[4925]: I0121 11:32:36.960046 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce019433304c1d318c6bf373367d5c42e9200351f80dc52095f137b78dc0a8e8" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.014502 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q69zm"] Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.020422 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q69zm"] Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.242996 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:32:37 crc kubenswrapper[4925]: E0121 11:32:37.243551 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="extract-content" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.243578 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="extract-content" Jan 21 11:32:37 crc kubenswrapper[4925]: E0121 11:32:37.243600 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="extract-utilities" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.243608 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="extract-utilities" Jan 21 11:32:37 crc kubenswrapper[4925]: E0121 11:32:37.243621 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0742dce7-0bc4-4c67-a1fa-abfa1921d449" containerName="watcher-kuttl-db-sync" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.243633 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0742dce7-0bc4-4c67-a1fa-abfa1921d449" containerName="watcher-kuttl-db-sync" Jan 21 11:32:37 crc kubenswrapper[4925]: E0121 11:32:37.243661 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="registry-server" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.243669 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="registry-server" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.243866 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0742dce7-0bc4-4c67-a1fa-abfa1921d449" containerName="watcher-kuttl-db-sync" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.243897 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" containerName="registry-server" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.245163 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.249575 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-gvjl6" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.249780 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.278765 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.335426 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.335743 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.335860 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.336083 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8ab333d-b735-413d-a903-c8a215353127-logs\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.336208 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2nkg\" (UniqueName: \"kubernetes.io/projected/e8ab333d-b735-413d-a903-c8a215353127-kube-api-access-v2nkg\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.336265 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.386478 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.387743 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.393207 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.403715 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.443178 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.443287 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.443341 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.443485 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8ab333d-b735-413d-a903-c8a215353127-logs\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.443532 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2nkg\" (UniqueName: \"kubernetes.io/projected/e8ab333d-b735-413d-a903-c8a215353127-kube-api-access-v2nkg\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.443566 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.447839 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.448185 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8ab333d-b735-413d-a903-c8a215353127-logs\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.453293 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.454641 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.456257 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.457717 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.462694 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.466030 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.490650 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2nkg\" (UniqueName: \"kubernetes.io/projected/e8ab333d-b735-413d-a903-c8a215353127-kube-api-access-v2nkg\") pod \"watcher-kuttl-api-0\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.491564 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.548552 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hfnz\" (UniqueName: \"kubernetes.io/projected/9346c552-eed6-447d-aa74-db8a61311a9b-kube-api-access-5hfnz\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.549024 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.549779 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.549844 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n28tq\" (UniqueName: \"kubernetes.io/projected/915b5d82-f42c-4046-9e7e-2581a6979377-kube-api-access-n28tq\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.549874 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.549908 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.549960 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9346c552-eed6-447d-aa74-db8a61311a9b-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.550053 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.550138 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.550234 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.550286 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/915b5d82-f42c-4046-9e7e-2581a6979377-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.557734 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="644e5583-d8a8-4351-af74-7d72252f2e9a" path="/var/lib/kubelet/pods/644e5583-d8a8-4351-af74-7d72252f2e9a/volumes" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.573536 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.651941 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.652385 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.652562 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/915b5d82-f42c-4046-9e7e-2581a6979377-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.652811 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hfnz\" (UniqueName: \"kubernetes.io/projected/9346c552-eed6-447d-aa74-db8a61311a9b-kube-api-access-5hfnz\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.652994 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.653135 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.653279 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n28tq\" (UniqueName: \"kubernetes.io/projected/915b5d82-f42c-4046-9e7e-2581a6979377-kube-api-access-n28tq\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.653417 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.653529 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.653641 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9346c552-eed6-447d-aa74-db8a61311a9b-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.653781 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.657349 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.660879 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.661057 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.661204 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9346c552-eed6-447d-aa74-db8a61311a9b-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.664864 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/915b5d82-f42c-4046-9e7e-2581a6979377-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.668189 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.668828 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.670640 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.671005 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.682278 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n28tq\" (UniqueName: \"kubernetes.io/projected/915b5d82-f42c-4046-9e7e-2581a6979377-kube-api-access-n28tq\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.695382 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hfnz\" (UniqueName: \"kubernetes.io/projected/9346c552-eed6-447d-aa74-db8a61311a9b-kube-api-access-5hfnz\") pod \"watcher-kuttl-applier-0\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.736885 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:37 crc kubenswrapper[4925]: I0121 11:32:37.867378 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:38 crc kubenswrapper[4925]: I0121 11:32:38.349064 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:32:38 crc kubenswrapper[4925]: I0121 11:32:38.573151 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:32:38 crc kubenswrapper[4925]: W0121 11:32:38.575803 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod915b5d82_f42c_4046_9e7e_2581a6979377.slice/crio-5b5765359829282e3055260cccbb76ef1f1bd3dbf1bc4e47621ed9a8bd7e84d7 WatchSource:0}: Error finding container 5b5765359829282e3055260cccbb76ef1f1bd3dbf1bc4e47621ed9a8bd7e84d7: Status 404 returned error can't find the container with id 5b5765359829282e3055260cccbb76ef1f1bd3dbf1bc4e47621ed9a8bd7e84d7 Jan 21 11:32:38 crc kubenswrapper[4925]: I0121 11:32:38.638267 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.162059 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"e8ab333d-b735-413d-a903-c8a215353127","Type":"ContainerStarted","Data":"e7b7f1d24deaf19f6642fd47c1c4d5ce271a668f758b8d0664a02194010895f4"} Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.162453 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"e8ab333d-b735-413d-a903-c8a215353127","Type":"ContainerStarted","Data":"e003e47f4e073c6511a2c5833d0639ee8690b5bdb3e27339f49ffc842d7e7e88"} Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.162479 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"e8ab333d-b735-413d-a903-c8a215353127","Type":"ContainerStarted","Data":"a3ce428660d973ac1efdfdb620ea84f4a0c1b5e86437f24d11ce4a1898fd8dfa"} Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.162913 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.168586 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"915b5d82-f42c-4046-9e7e-2581a6979377","Type":"ContainerStarted","Data":"4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd"} Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.168632 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"915b5d82-f42c-4046-9e7e-2581a6979377","Type":"ContainerStarted","Data":"5b5765359829282e3055260cccbb76ef1f1bd3dbf1bc4e47621ed9a8bd7e84d7"} Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.192755 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"9346c552-eed6-447d-aa74-db8a61311a9b","Type":"ContainerStarted","Data":"bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1"} Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.192817 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"9346c552-eed6-447d-aa74-db8a61311a9b","Type":"ContainerStarted","Data":"6481fb0f153001471a92d02eec09bf66605a11011a32fa7e48d1a1e3d6a9c5e8"} Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.212928 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=2.2129023820000002 podStartE2EDuration="2.212902382s" podCreationTimestamp="2026-01-21 11:32:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:32:39.208776823 +0000 UTC m=+2250.812668777" watchObservedRunningTime="2026-01-21 11:32:39.212902382 +0000 UTC m=+2250.816794336" Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.240979 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=2.240955515 podStartE2EDuration="2.240955515s" podCreationTimestamp="2026-01-21 11:32:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:32:39.232351304 +0000 UTC m=+2250.836243248" watchObservedRunningTime="2026-01-21 11:32:39.240955515 +0000 UTC m=+2250.844847449" Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.260031 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.260006365 podStartE2EDuration="2.260006365s" podCreationTimestamp="2026-01-21 11:32:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:32:39.253537682 +0000 UTC m=+2250.857429626" watchObservedRunningTime="2026-01-21 11:32:39.260006365 +0000 UTC m=+2250.863898299" Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.319279 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.324201 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:39 crc kubenswrapper[4925]: I0121 11:32:39.397995 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:40 crc kubenswrapper[4925]: I0121 11:32:40.545773 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:41 crc kubenswrapper[4925]: I0121 11:32:41.781894 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:42 crc kubenswrapper[4925]: I0121 11:32:42.322007 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:42 crc kubenswrapper[4925]: I0121 11:32:42.576856 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:42 crc kubenswrapper[4925]: I0121 11:32:42.738743 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:43 crc kubenswrapper[4925]: I0121 11:32:43.013594 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:43 crc kubenswrapper[4925]: I0121 11:32:43.800454 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zg97f"] Jan 21 11:32:43 crc kubenswrapper[4925]: I0121 11:32:43.800868 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zg97f" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="registry-server" containerID="cri-o://2eda51430ab37ae670085739359b9479998297eede92ceeb577a689345e93a29" gracePeriod=2 Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.321170 4925 generic.go:334] "Generic (PLEG): container finished" podID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerID="2eda51430ab37ae670085739359b9479998297eede92ceeb577a689345e93a29" exitCode=0 Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.321532 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zg97f" event={"ID":"deaf0a13-510b-445b-8dae-a60d6d385a8a","Type":"ContainerDied","Data":"2eda51430ab37ae670085739359b9479998297eede92ceeb577a689345e93a29"} Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.340779 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.471141 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.607459 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-catalog-content\") pod \"deaf0a13-510b-445b-8dae-a60d6d385a8a\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.607888 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfkd8\" (UniqueName: \"kubernetes.io/projected/deaf0a13-510b-445b-8dae-a60d6d385a8a-kube-api-access-xfkd8\") pod \"deaf0a13-510b-445b-8dae-a60d6d385a8a\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.607985 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-utilities\") pod \"deaf0a13-510b-445b-8dae-a60d6d385a8a\" (UID: \"deaf0a13-510b-445b-8dae-a60d6d385a8a\") " Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.610760 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-utilities" (OuterVolumeSpecName: "utilities") pod "deaf0a13-510b-445b-8dae-a60d6d385a8a" (UID: "deaf0a13-510b-445b-8dae-a60d6d385a8a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.621886 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/deaf0a13-510b-445b-8dae-a60d6d385a8a-kube-api-access-xfkd8" (OuterVolumeSpecName: "kube-api-access-xfkd8") pod "deaf0a13-510b-445b-8dae-a60d6d385a8a" (UID: "deaf0a13-510b-445b-8dae-a60d6d385a8a"). InnerVolumeSpecName "kube-api-access-xfkd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.691358 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "deaf0a13-510b-445b-8dae-a60d6d385a8a" (UID: "deaf0a13-510b-445b-8dae-a60d6d385a8a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.710014 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.710064 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfkd8\" (UniqueName: \"kubernetes.io/projected/deaf0a13-510b-445b-8dae-a60d6d385a8a-kube-api-access-xfkd8\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:44 crc kubenswrapper[4925]: I0121 11:32:44.710079 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deaf0a13-510b-445b-8dae-a60d6d385a8a-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.336764 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zg97f" event={"ID":"deaf0a13-510b-445b-8dae-a60d6d385a8a","Type":"ContainerDied","Data":"305d8b2a4da488ceee3cdbf94dea5b93b7d7955868fbd71a9b47499455988201"} Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.336854 4925 scope.go:117] "RemoveContainer" containerID="2eda51430ab37ae670085739359b9479998297eede92ceeb577a689345e93a29" Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.337069 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zg97f" Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.363533 4925 scope.go:117] "RemoveContainer" containerID="4e80dc3ec843089df0f6462fad6a150737d97415cebebb73eb3a5ff22ffb1cef" Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.384076 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zg97f"] Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.385847 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zg97f"] Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.397920 4925 scope.go:117] "RemoveContainer" containerID="99d34b739e2de0e15bf6874802f9497ff74a2f12b3c013395112189fe3f429fd" Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.515611 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" path="/var/lib/kubelet/pods/deaf0a13-510b-445b-8dae-a60d6d385a8a/volumes" Jan 21 11:32:45 crc kubenswrapper[4925]: I0121 11:32:45.608740 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:46 crc kubenswrapper[4925]: I0121 11:32:46.838083 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:47 crc kubenswrapper[4925]: I0121 11:32:47.574713 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:47 crc kubenswrapper[4925]: I0121 11:32:47.591845 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:47 crc kubenswrapper[4925]: I0121 11:32:47.738545 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:47 crc kubenswrapper[4925]: I0121 11:32:47.767326 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:47 crc kubenswrapper[4925]: I0121 11:32:47.868902 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:47 crc kubenswrapper[4925]: I0121 11:32:47.896474 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.079690 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.400199 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.408857 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.431135 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.431221 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.755195 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6c57t"] Jan 21 11:32:48 crc kubenswrapper[4925]: E0121 11:32:48.756375 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="extract-utilities" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.756823 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="extract-utilities" Jan 21 11:32:48 crc kubenswrapper[4925]: E0121 11:32:48.756947 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="registry-server" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.757046 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="registry-server" Jan 21 11:32:48 crc kubenswrapper[4925]: E0121 11:32:48.757185 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="extract-content" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.757327 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="extract-content" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.757794 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="deaf0a13-510b-445b-8dae-a60d6d385a8a" containerName="registry-server" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.759614 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.786367 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6c57t"] Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.921667 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg6wg\" (UniqueName: \"kubernetes.io/projected/87aa630b-73cc-4364-99bd-f6cd204056ef-kube-api-access-sg6wg\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.921812 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-catalog-content\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:48 crc kubenswrapper[4925]: I0121 11:32:48.921878 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-utilities\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:49 crc kubenswrapper[4925]: I0121 11:32:49.023301 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg6wg\" (UniqueName: \"kubernetes.io/projected/87aa630b-73cc-4364-99bd-f6cd204056ef-kube-api-access-sg6wg\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:49 crc kubenswrapper[4925]: I0121 11:32:49.023431 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-catalog-content\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:49 crc kubenswrapper[4925]: I0121 11:32:49.023474 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-utilities\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:49 crc kubenswrapper[4925]: I0121 11:32:49.024080 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-utilities\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:49 crc kubenswrapper[4925]: I0121 11:32:49.024136 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-catalog-content\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:49 crc kubenswrapper[4925]: I0121 11:32:49.057744 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg6wg\" (UniqueName: \"kubernetes.io/projected/87aa630b-73cc-4364-99bd-f6cd204056ef-kube-api-access-sg6wg\") pod \"community-operators-6c57t\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:49 crc kubenswrapper[4925]: I0121 11:32:49.082942 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.337571 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.641653 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6c57t"] Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.680941 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.941615 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.941675 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.941726 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.942553 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"14144a36600bd7d5b9a71777ea7bcad1b2af7e52667e89f48ae846cc78fbbc2d"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:49.942609 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://14144a36600bd7d5b9a71777ea7bcad1b2af7e52667e89f48ae846cc78fbbc2d" gracePeriod=600 Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.138595 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-db-create-4fv8w"] Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.141503 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.154498 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-f1be-account-create-update-hffks"] Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.156378 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.163943 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-create-4fv8w"] Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.164251 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-db-secret" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.165144 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqr72\" (UniqueName: \"kubernetes.io/projected/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-kube-api-access-dqr72\") pod \"cinder-db-create-4fv8w\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.165207 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2164931-e4db-447d-9821-118982670bdc-operator-scripts\") pod \"cinder-f1be-account-create-update-hffks\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.165311 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h85zn\" (UniqueName: \"kubernetes.io/projected/b2164931-e4db-447d-9821-118982670bdc-kube-api-access-h85zn\") pod \"cinder-f1be-account-create-update-hffks\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.165410 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-operator-scripts\") pod \"cinder-db-create-4fv8w\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.184076 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-f1be-account-create-update-hffks"] Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.270199 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h85zn\" (UniqueName: \"kubernetes.io/projected/b2164931-e4db-447d-9821-118982670bdc-kube-api-access-h85zn\") pod \"cinder-f1be-account-create-update-hffks\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.270317 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-operator-scripts\") pod \"cinder-db-create-4fv8w\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.270507 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqr72\" (UniqueName: \"kubernetes.io/projected/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-kube-api-access-dqr72\") pod \"cinder-db-create-4fv8w\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.270544 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2164931-e4db-447d-9821-118982670bdc-operator-scripts\") pod \"cinder-f1be-account-create-update-hffks\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.271326 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-operator-scripts\") pod \"cinder-db-create-4fv8w\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.272053 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2164931-e4db-447d-9821-118982670bdc-operator-scripts\") pod \"cinder-f1be-account-create-update-hffks\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.295968 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h85zn\" (UniqueName: \"kubernetes.io/projected/b2164931-e4db-447d-9821-118982670bdc-kube-api-access-h85zn\") pod \"cinder-f1be-account-create-update-hffks\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.297705 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqr72\" (UniqueName: \"kubernetes.io/projected/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-kube-api-access-dqr72\") pod \"cinder-db-create-4fv8w\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.431879 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="14144a36600bd7d5b9a71777ea7bcad1b2af7e52667e89f48ae846cc78fbbc2d" exitCode=0 Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.431920 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"14144a36600bd7d5b9a71777ea7bcad1b2af7e52667e89f48ae846cc78fbbc2d"} Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.431993 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e"} Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.432015 4925 scope.go:117] "RemoveContainer" containerID="50b5f6a9960ebd0016f529b4e13788c8c44ae0b33ac0270b386d5f3128055f73" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.435999 4925 generic.go:334] "Generic (PLEG): container finished" podID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerID="299d857f7f79194b648b9d2f67fe41e5578d9d31ba699ce06cb0af5cddb9de0d" exitCode=0 Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.436956 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c57t" event={"ID":"87aa630b-73cc-4364-99bd-f6cd204056ef","Type":"ContainerDied","Data":"299d857f7f79194b648b9d2f67fe41e5578d9d31ba699ce06cb0af5cddb9de0d"} Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.437005 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c57t" event={"ID":"87aa630b-73cc-4364-99bd-f6cd204056ef","Type":"ContainerStarted","Data":"e3264e42f6915a0e71121f41ba4f7852d4c7c13cfdaba9410357ba1eeea192f7"} Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.482913 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.543717 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:50 crc kubenswrapper[4925]: I0121 11:32:50.898458 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:51 crc kubenswrapper[4925]: I0121 11:32:51.125203 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-create-4fv8w"] Jan 21 11:32:51 crc kubenswrapper[4925]: W0121 11:32:51.159012 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c98b6c2_3cd8_4c48_8142_de9d6e11be9c.slice/crio-d1576fd36be5d60177398d6bb33d18e7b66e4c0d68195ee06050c777a528234d WatchSource:0}: Error finding container d1576fd36be5d60177398d6bb33d18e7b66e4c0d68195ee06050c777a528234d: Status 404 returned error can't find the container with id d1576fd36be5d60177398d6bb33d18e7b66e4c0d68195ee06050c777a528234d Jan 21 11:32:51 crc kubenswrapper[4925]: I0121 11:32:51.234757 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-f1be-account-create-update-hffks"] Jan 21 11:32:51 crc kubenswrapper[4925]: W0121 11:32:51.249532 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2164931_e4db_447d_9821_118982670bdc.slice/crio-35c682a483c35619f5218ce12dfd8c03563e5203993673154a369ddb92e41065 WatchSource:0}: Error finding container 35c682a483c35619f5218ce12dfd8c03563e5203993673154a369ddb92e41065: Status 404 returned error can't find the container with id 35c682a483c35619f5218ce12dfd8c03563e5203993673154a369ddb92e41065 Jan 21 11:32:51 crc kubenswrapper[4925]: I0121 11:32:51.464224 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c57t" event={"ID":"87aa630b-73cc-4364-99bd-f6cd204056ef","Type":"ContainerStarted","Data":"2f825ef69ee5cec37bc8aec710a50b3b218cb289d19490c3223306513dd3362d"} Jan 21 11:32:51 crc kubenswrapper[4925]: I0121 11:32:51.473348 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-create-4fv8w" event={"ID":"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c","Type":"ContainerStarted","Data":"fea557108ee2880b6213b3ae9c90d1619bfae757a0ab40c09b3b4dade764862a"} Jan 21 11:32:51 crc kubenswrapper[4925]: I0121 11:32:51.473428 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-create-4fv8w" event={"ID":"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c","Type":"ContainerStarted","Data":"d1576fd36be5d60177398d6bb33d18e7b66e4c0d68195ee06050c777a528234d"} Jan 21 11:32:51 crc kubenswrapper[4925]: I0121 11:32:51.482788 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" event={"ID":"b2164931-e4db-447d-9821-118982670bdc","Type":"ContainerStarted","Data":"35c682a483c35619f5218ce12dfd8c03563e5203993673154a369ddb92e41065"} Jan 21 11:32:51 crc kubenswrapper[4925]: I0121 11:32:51.551606 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-db-create-4fv8w" podStartSLOduration=1.551560541 podStartE2EDuration="1.551560541s" podCreationTimestamp="2026-01-21 11:32:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:32:51.52422812 +0000 UTC m=+2263.128120064" watchObservedRunningTime="2026-01-21 11:32:51.551560541 +0000 UTC m=+2263.155452475" Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.163489 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.493485 4925 generic.go:334] "Generic (PLEG): container finished" podID="b2164931-e4db-447d-9821-118982670bdc" containerID="b3f2c97406c139215c34b870a0b526a4e41dc1fda993e0ab1b4230292a5f5dce" exitCode=0 Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.494422 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" event={"ID":"b2164931-e4db-447d-9821-118982670bdc","Type":"ContainerDied","Data":"b3f2c97406c139215c34b870a0b526a4e41dc1fda993e0ab1b4230292a5f5dce"} Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.503795 4925 generic.go:334] "Generic (PLEG): container finished" podID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerID="2f825ef69ee5cec37bc8aec710a50b3b218cb289d19490c3223306513dd3362d" exitCode=0 Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.503908 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c57t" event={"ID":"87aa630b-73cc-4364-99bd-f6cd204056ef","Type":"ContainerDied","Data":"2f825ef69ee5cec37bc8aec710a50b3b218cb289d19490c3223306513dd3362d"} Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.506546 4925 generic.go:334] "Generic (PLEG): container finished" podID="4c98b6c2-3cd8-4c48-8142-de9d6e11be9c" containerID="fea557108ee2880b6213b3ae9c90d1619bfae757a0ab40c09b3b4dade764862a" exitCode=0 Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.506609 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-create-4fv8w" event={"ID":"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c","Type":"ContainerDied","Data":"fea557108ee2880b6213b3ae9c90d1619bfae757a0ab40c09b3b4dade764862a"} Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.953779 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.958777 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-central-agent" containerID="cri-o://60e05be369a70403b1d12d1d0a3a825a6c31d501fde6f57728c19646e856e254" gracePeriod=30 Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.958894 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="proxy-httpd" containerID="cri-o://8936c9a16c3fb1847956140594253afafdb741f4644a4c4a45ddb1737a966c69" gracePeriod=30 Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.958903 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-notification-agent" containerID="cri-o://a1b44f02fed53e80bc0bf82fd6439fbd7535569ff5d8e46d3e83e189fe585009" gracePeriod=30 Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.958898 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="sg-core" containerID="cri-o://30cabfecac6697e249e5976f11acaf774fbe53c2c1bb8872fe3b983bbca1c3cf" gracePeriod=30 Jan 21 11:32:52 crc kubenswrapper[4925]: I0121 11:32:52.974841 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.181:3000/\": EOF" Jan 21 11:32:53 crc kubenswrapper[4925]: I0121 11:32:53.417384 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:53 crc kubenswrapper[4925]: I0121 11:32:53.518737 4925 generic.go:334] "Generic (PLEG): container finished" podID="c5109392-5870-4911-b674-2f78cf27c0ca" containerID="8936c9a16c3fb1847956140594253afafdb741f4644a4c4a45ddb1737a966c69" exitCode=0 Jan 21 11:32:53 crc kubenswrapper[4925]: I0121 11:32:53.518773 4925 generic.go:334] "Generic (PLEG): container finished" podID="c5109392-5870-4911-b674-2f78cf27c0ca" containerID="30cabfecac6697e249e5976f11acaf774fbe53c2c1bb8872fe3b983bbca1c3cf" exitCode=2 Jan 21 11:32:53 crc kubenswrapper[4925]: I0121 11:32:53.518846 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerDied","Data":"8936c9a16c3fb1847956140594253afafdb741f4644a4c4a45ddb1737a966c69"} Jan 21 11:32:53 crc kubenswrapper[4925]: I0121 11:32:53.518906 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerDied","Data":"30cabfecac6697e249e5976f11acaf774fbe53c2c1bb8872fe3b983bbca1c3cf"} Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.212288 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.223878 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.308155 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-operator-scripts\") pod \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.308327 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2164931-e4db-447d-9821-118982670bdc-operator-scripts\") pod \"b2164931-e4db-447d-9821-118982670bdc\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.308510 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h85zn\" (UniqueName: \"kubernetes.io/projected/b2164931-e4db-447d-9821-118982670bdc-kube-api-access-h85zn\") pod \"b2164931-e4db-447d-9821-118982670bdc\" (UID: \"b2164931-e4db-447d-9821-118982670bdc\") " Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.308559 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqr72\" (UniqueName: \"kubernetes.io/projected/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-kube-api-access-dqr72\") pod \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\" (UID: \"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c\") " Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.310371 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2164931-e4db-447d-9821-118982670bdc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2164931-e4db-447d-9821-118982670bdc" (UID: "b2164931-e4db-447d-9821-118982670bdc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.311466 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4c98b6c2-3cd8-4c48-8142-de9d6e11be9c" (UID: "4c98b6c2-3cd8-4c48-8142-de9d6e11be9c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.316733 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-kube-api-access-dqr72" (OuterVolumeSpecName: "kube-api-access-dqr72") pod "4c98b6c2-3cd8-4c48-8142-de9d6e11be9c" (UID: "4c98b6c2-3cd8-4c48-8142-de9d6e11be9c"). InnerVolumeSpecName "kube-api-access-dqr72". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.316995 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2164931-e4db-447d-9821-118982670bdc-kube-api-access-h85zn" (OuterVolumeSpecName: "kube-api-access-h85zn") pod "b2164931-e4db-447d-9821-118982670bdc" (UID: "b2164931-e4db-447d-9821-118982670bdc"). InnerVolumeSpecName "kube-api-access-h85zn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.412202 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h85zn\" (UniqueName: \"kubernetes.io/projected/b2164931-e4db-447d-9821-118982670bdc-kube-api-access-h85zn\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.412255 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqr72\" (UniqueName: \"kubernetes.io/projected/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-kube-api-access-dqr72\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.412271 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.412284 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2164931-e4db-447d-9821-118982670bdc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.553882 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-create-4fv8w" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.553953 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-create-4fv8w" event={"ID":"4c98b6c2-3cd8-4c48-8142-de9d6e11be9c","Type":"ContainerDied","Data":"d1576fd36be5d60177398d6bb33d18e7b66e4c0d68195ee06050c777a528234d"} Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.554026 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1576fd36be5d60177398d6bb33d18e7b66e4c0d68195ee06050c777a528234d" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.563642 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.563739 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-f1be-account-create-update-hffks" event={"ID":"b2164931-e4db-447d-9821-118982670bdc","Type":"ContainerDied","Data":"35c682a483c35619f5218ce12dfd8c03563e5203993673154a369ddb92e41065"} Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.563849 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35c682a483c35619f5218ce12dfd8c03563e5203993673154a369ddb92e41065" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.569591 4925 generic.go:334] "Generic (PLEG): container finished" podID="c5109392-5870-4911-b674-2f78cf27c0ca" containerID="a1b44f02fed53e80bc0bf82fd6439fbd7535569ff5d8e46d3e83e189fe585009" exitCode=0 Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.569632 4925 generic.go:334] "Generic (PLEG): container finished" podID="c5109392-5870-4911-b674-2f78cf27c0ca" containerID="60e05be369a70403b1d12d1d0a3a825a6c31d501fde6f57728c19646e856e254" exitCode=0 Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.569666 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerDied","Data":"a1b44f02fed53e80bc0bf82fd6439fbd7535569ff5d8e46d3e83e189fe585009"} Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.569730 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerDied","Data":"60e05be369a70403b1d12d1d0a3a825a6c31d501fde6f57728c19646e856e254"} Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.572898 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c57t" event={"ID":"87aa630b-73cc-4364-99bd-f6cd204056ef","Type":"ContainerStarted","Data":"e87e8a2e2e6a8743220477fbbc482db9094259cfa8cbb5615cbb4be771f009d5"} Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.732908 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.950387 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:54 crc kubenswrapper[4925]: I0121 11:32:54.977666 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6c57t" podStartSLOduration=3.193502699 podStartE2EDuration="6.977636124s" podCreationTimestamp="2026-01-21 11:32:48 +0000 UTC" firstStartedPulling="2026-01-21 11:32:50.438334383 +0000 UTC m=+2262.042226317" lastFinishedPulling="2026-01-21 11:32:54.222467798 +0000 UTC m=+2265.826359742" observedRunningTime="2026-01-21 11:32:54.611609835 +0000 UTC m=+2266.215501769" watchObservedRunningTime="2026-01-21 11:32:54.977636124 +0000 UTC m=+2266.581528058" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023345 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kshmh\" (UniqueName: \"kubernetes.io/projected/c5109392-5870-4911-b674-2f78cf27c0ca-kube-api-access-kshmh\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023411 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-combined-ca-bundle\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023443 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-config-data\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023470 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-log-httpd\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023520 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-ceilometer-tls-certs\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023567 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-run-httpd\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023609 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-scripts\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.023635 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-sg-core-conf-yaml\") pod \"c5109392-5870-4911-b674-2f78cf27c0ca\" (UID: \"c5109392-5870-4911-b674-2f78cf27c0ca\") " Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.025024 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.025212 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.035686 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-scripts" (OuterVolumeSpecName: "scripts") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.036175 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5109392-5870-4911-b674-2f78cf27c0ca-kube-api-access-kshmh" (OuterVolumeSpecName: "kube-api-access-kshmh") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "kube-api-access-kshmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.051071 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.097409 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.109790 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.126683 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.126732 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.126747 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5109392-5870-4911-b674-2f78cf27c0ca-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.126759 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.126774 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.126785 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kshmh\" (UniqueName: \"kubernetes.io/projected/c5109392-5870-4911-b674-2f78cf27c0ca-kube-api-access-kshmh\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.126795 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.135867 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-config-data" (OuterVolumeSpecName: "config-data") pod "c5109392-5870-4911-b674-2f78cf27c0ca" (UID: "c5109392-5870-4911-b674-2f78cf27c0ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.228361 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5109392-5870-4911-b674-2f78cf27c0ca-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.586893 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"c5109392-5870-4911-b674-2f78cf27c0ca","Type":"ContainerDied","Data":"896c47a69c7c8978b6540ee3e4fafb0e13cf0b01bbe0abe8971dcbdb81375464"} Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.586972 4925 scope.go:117] "RemoveContainer" containerID="8936c9a16c3fb1847956140594253afafdb741f4644a4c4a45ddb1737a966c69" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.587188 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.616181 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.637014 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.653455 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:55 crc kubenswrapper[4925]: E0121 11:32:55.653875 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-central-agent" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.653896 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-central-agent" Jan 21 11:32:55 crc kubenswrapper[4925]: E0121 11:32:55.653910 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="proxy-httpd" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.653917 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="proxy-httpd" Jan 21 11:32:55 crc kubenswrapper[4925]: E0121 11:32:55.653931 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2164931-e4db-447d-9821-118982670bdc" containerName="mariadb-account-create-update" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.653937 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2164931-e4db-447d-9821-118982670bdc" containerName="mariadb-account-create-update" Jan 21 11:32:55 crc kubenswrapper[4925]: E0121 11:32:55.653954 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="sg-core" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.653961 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="sg-core" Jan 21 11:32:55 crc kubenswrapper[4925]: E0121 11:32:55.653981 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-notification-agent" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.653988 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-notification-agent" Jan 21 11:32:55 crc kubenswrapper[4925]: E0121 11:32:55.654001 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c98b6c2-3cd8-4c48-8142-de9d6e11be9c" containerName="mariadb-database-create" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.654008 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c98b6c2-3cd8-4c48-8142-de9d6e11be9c" containerName="mariadb-database-create" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.654154 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-central-agent" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.654166 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="ceilometer-notification-agent" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.654181 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="sg-core" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.654192 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" containerName="proxy-httpd" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.654201 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2164931-e4db-447d-9821-118982670bdc" containerName="mariadb-account-create-update" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.654215 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c98b6c2-3cd8-4c48-8142-de9d6e11be9c" containerName="mariadb-database-create" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.657180 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.657644 4925 scope.go:117] "RemoveContainer" containerID="30cabfecac6697e249e5976f11acaf774fbe53c2c1bb8872fe3b983bbca1c3cf" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.664000 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.664238 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.664452 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.680206 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.713148 4925 scope.go:117] "RemoveContainer" containerID="a1b44f02fed53e80bc0bf82fd6439fbd7535569ff5d8e46d3e83e189fe585009" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.751553 4925 scope.go:117] "RemoveContainer" containerID="60e05be369a70403b1d12d1d0a3a825a6c31d501fde6f57728c19646e856e254" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877196 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-run-httpd\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877258 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-scripts\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877305 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-config-data\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877438 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvlv9\" (UniqueName: \"kubernetes.io/projected/3ccb7b10-6925-40de-8856-172fc9ad9077-kube-api-access-tvlv9\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877496 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877660 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877698 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-log-httpd\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.877737 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979012 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979517 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-log-httpd\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979574 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979616 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-run-httpd\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979642 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-scripts\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979679 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-config-data\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979765 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvlv9\" (UniqueName: \"kubernetes.io/projected/3ccb7b10-6925-40de-8856-172fc9ad9077-kube-api-access-tvlv9\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.979823 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.980346 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-log-httpd\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.980376 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-run-httpd\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.983108 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.986859 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.987160 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-config-data\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.987302 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-scripts\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.993780 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:55 crc kubenswrapper[4925]: I0121 11:32:55.994029 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:56 crc kubenswrapper[4925]: I0121 11:32:56.008549 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvlv9\" (UniqueName: \"kubernetes.io/projected/3ccb7b10-6925-40de-8856-172fc9ad9077-kube-api-access-tvlv9\") pod \"ceilometer-0\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:56 crc kubenswrapper[4925]: I0121 11:32:56.281666 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:32:57 crc kubenswrapper[4925]: I0121 11:32:57.088165 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:32:57 crc kubenswrapper[4925]: W0121 11:32:57.106138 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ccb7b10_6925_40de_8856_172fc9ad9077.slice/crio-1b3e331f3a3b8a4b1dea3a4dd9ba5a1ebc171deb16e7df0c62db1161544ca0a3 WatchSource:0}: Error finding container 1b3e331f3a3b8a4b1dea3a4dd9ba5a1ebc171deb16e7df0c62db1161544ca0a3: Status 404 returned error can't find the container with id 1b3e331f3a3b8a4b1dea3a4dd9ba5a1ebc171deb16e7df0c62db1161544ca0a3 Jan 21 11:32:57 crc kubenswrapper[4925]: I0121 11:32:57.228551 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:57 crc kubenswrapper[4925]: I0121 11:32:57.524794 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5109392-5870-4911-b674-2f78cf27c0ca" path="/var/lib/kubelet/pods/c5109392-5870-4911-b674-2f78cf27c0ca/volumes" Jan 21 11:32:57 crc kubenswrapper[4925]: I0121 11:32:57.608096 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerStarted","Data":"1b3e331f3a3b8a4b1dea3a4dd9ba5a1ebc171deb16e7df0c62db1161544ca0a3"} Jan 21 11:32:58 crc kubenswrapper[4925]: I0121 11:32:58.522049 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:32:58 crc kubenswrapper[4925]: I0121 11:32:58.630092 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerStarted","Data":"5dd9edb1ebc8474107f1a3ef3fa74372643f26baeffc5135e86c9aa096873962"} Jan 21 11:32:59 crc kubenswrapper[4925]: I0121 11:32:59.083171 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:59 crc kubenswrapper[4925]: I0121 11:32:59.083547 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:59 crc kubenswrapper[4925]: I0121 11:32:59.137931 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:59 crc kubenswrapper[4925]: I0121 11:32:59.647529 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerStarted","Data":"4c83e756039b53a4cfb1589d92b4edc6a5513b7d52cee54c440be76fa516a59c"} Jan 21 11:32:59 crc kubenswrapper[4925]: I0121 11:32:59.723221 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:32:59 crc kubenswrapper[4925]: I0121 11:32:59.806216 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.400031 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-db-sync-8mp9b"] Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.401780 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.405529 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-cinder-dockercfg-9cmn8" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.405616 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scripts" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.405889 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-config-data" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.413154 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-8mp9b"] Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.579302 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz8sn\" (UniqueName: \"kubernetes.io/projected/2e843ee1-28fe-459d-9dd7-4a8b41127812-kube-api-access-rz8sn\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.579375 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-combined-ca-bundle\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.579604 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-config-data\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.579774 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-scripts\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.580003 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e843ee1-28fe-459d-9dd7-4a8b41127812-etc-machine-id\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.580081 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-db-sync-config-data\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.660936 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerStarted","Data":"102fece7e50a90dd51662a11d44186fed5562a7b71ad6c10468170ce07d7709b"} Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.682658 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-scripts\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.682820 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e843ee1-28fe-459d-9dd7-4a8b41127812-etc-machine-id\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.682854 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-db-sync-config-data\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.682900 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz8sn\" (UniqueName: \"kubernetes.io/projected/2e843ee1-28fe-459d-9dd7-4a8b41127812-kube-api-access-rz8sn\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.682947 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-combined-ca-bundle\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.682997 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-config-data\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.682991 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e843ee1-28fe-459d-9dd7-4a8b41127812-etc-machine-id\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.688977 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-db-sync-config-data\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.689574 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-config-data\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.690644 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-combined-ca-bundle\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.690890 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-scripts\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.702361 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz8sn\" (UniqueName: \"kubernetes.io/projected/2e843ee1-28fe-459d-9dd7-4a8b41127812-kube-api-access-rz8sn\") pod \"cinder-db-sync-8mp9b\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:00 crc kubenswrapper[4925]: I0121 11:33:00.720896 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:01 crc kubenswrapper[4925]: I0121 11:33:01.173761 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:01 crc kubenswrapper[4925]: I0121 11:33:01.586054 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-8mp9b"] Jan 21 11:33:01 crc kubenswrapper[4925]: W0121 11:33:01.597502 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e843ee1_28fe_459d_9dd7_4a8b41127812.slice/crio-3822fe663f2cb11fbdc91056531fc72dbf2332eb72d0a820187633cce50ab252 WatchSource:0}: Error finding container 3822fe663f2cb11fbdc91056531fc72dbf2332eb72d0a820187633cce50ab252: Status 404 returned error can't find the container with id 3822fe663f2cb11fbdc91056531fc72dbf2332eb72d0a820187633cce50ab252 Jan 21 11:33:01 crc kubenswrapper[4925]: I0121 11:33:01.757218 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" event={"ID":"2e843ee1-28fe-459d-9dd7-4a8b41127812","Type":"ContainerStarted","Data":"3822fe663f2cb11fbdc91056531fc72dbf2332eb72d0a820187633cce50ab252"} Jan 21 11:33:02 crc kubenswrapper[4925]: I0121 11:33:02.399503 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:02 crc kubenswrapper[4925]: I0121 11:33:02.848451 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerStarted","Data":"7c92769a060e35c49f2a65bf4eb5e49852a9881d526d341ac6fad7909c08b41e"} Jan 21 11:33:02 crc kubenswrapper[4925]: I0121 11:33:02.848771 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:33:02 crc kubenswrapper[4925]: I0121 11:33:02.878317 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=3.7779666179999998 podStartE2EDuration="7.878281772s" podCreationTimestamp="2026-01-21 11:32:55 +0000 UTC" firstStartedPulling="2026-01-21 11:32:57.112385201 +0000 UTC m=+2268.716277135" lastFinishedPulling="2026-01-21 11:33:01.212700355 +0000 UTC m=+2272.816592289" observedRunningTime="2026-01-21 11:33:02.871491307 +0000 UTC m=+2274.475383241" watchObservedRunningTime="2026-01-21 11:33:02.878281772 +0000 UTC m=+2274.482173706" Jan 21 11:33:03 crc kubenswrapper[4925]: I0121 11:33:03.528700 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6c57t"] Jan 21 11:33:03 crc kubenswrapper[4925]: I0121 11:33:03.529286 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6c57t" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="registry-server" containerID="cri-o://e87e8a2e2e6a8743220477fbbc482db9094259cfa8cbb5615cbb4be771f009d5" gracePeriod=2 Jan 21 11:33:03 crc kubenswrapper[4925]: I0121 11:33:03.659154 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:03 crc kubenswrapper[4925]: I0121 11:33:03.954217 4925 generic.go:334] "Generic (PLEG): container finished" podID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerID="e87e8a2e2e6a8743220477fbbc482db9094259cfa8cbb5615cbb4be771f009d5" exitCode=0 Jan 21 11:33:03 crc kubenswrapper[4925]: I0121 11:33:03.954371 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c57t" event={"ID":"87aa630b-73cc-4364-99bd-f6cd204056ef","Type":"ContainerDied","Data":"e87e8a2e2e6a8743220477fbbc482db9094259cfa8cbb5615cbb4be771f009d5"} Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.307911 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.533287 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-catalog-content\") pod \"87aa630b-73cc-4364-99bd-f6cd204056ef\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.533531 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-utilities\") pod \"87aa630b-73cc-4364-99bd-f6cd204056ef\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.533645 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sg6wg\" (UniqueName: \"kubernetes.io/projected/87aa630b-73cc-4364-99bd-f6cd204056ef-kube-api-access-sg6wg\") pod \"87aa630b-73cc-4364-99bd-f6cd204056ef\" (UID: \"87aa630b-73cc-4364-99bd-f6cd204056ef\") " Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.536929 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-utilities" (OuterVolumeSpecName: "utilities") pod "87aa630b-73cc-4364-99bd-f6cd204056ef" (UID: "87aa630b-73cc-4364-99bd-f6cd204056ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.541709 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87aa630b-73cc-4364-99bd-f6cd204056ef-kube-api-access-sg6wg" (OuterVolumeSpecName: "kube-api-access-sg6wg") pod "87aa630b-73cc-4364-99bd-f6cd204056ef" (UID: "87aa630b-73cc-4364-99bd-f6cd204056ef"). InnerVolumeSpecName "kube-api-access-sg6wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.622040 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87aa630b-73cc-4364-99bd-f6cd204056ef" (UID: "87aa630b-73cc-4364-99bd-f6cd204056ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.635627 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.635666 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sg6wg\" (UniqueName: \"kubernetes.io/projected/87aa630b-73cc-4364-99bd-f6cd204056ef-kube-api-access-sg6wg\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.635680 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87aa630b-73cc-4364-99bd-f6cd204056ef-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.932999 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.975263 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6c57t" event={"ID":"87aa630b-73cc-4364-99bd-f6cd204056ef","Type":"ContainerDied","Data":"e3264e42f6915a0e71121f41ba4f7852d4c7c13cfdaba9410357ba1eeea192f7"} Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.975364 4925 scope.go:117] "RemoveContainer" containerID="e87e8a2e2e6a8743220477fbbc482db9094259cfa8cbb5615cbb4be771f009d5" Jan 21 11:33:04 crc kubenswrapper[4925]: I0121 11:33:04.975360 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6c57t" Jan 21 11:33:05 crc kubenswrapper[4925]: I0121 11:33:05.026995 4925 scope.go:117] "RemoveContainer" containerID="2f825ef69ee5cec37bc8aec710a50b3b218cb289d19490c3223306513dd3362d" Jan 21 11:33:05 crc kubenswrapper[4925]: I0121 11:33:05.128781 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6c57t"] Jan 21 11:33:05 crc kubenswrapper[4925]: I0121 11:33:05.163080 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6c57t"] Jan 21 11:33:05 crc kubenswrapper[4925]: I0121 11:33:05.166434 4925 scope.go:117] "RemoveContainer" containerID="299d857f7f79194b648b9d2f67fe41e5578d9d31ba699ce06cb0af5cddb9de0d" Jan 21 11:33:05 crc kubenswrapper[4925]: I0121 11:33:05.515811 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" path="/var/lib/kubelet/pods/87aa630b-73cc-4364-99bd-f6cd204056ef/volumes" Jan 21 11:33:06 crc kubenswrapper[4925]: I0121 11:33:06.213003 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:07 crc kubenswrapper[4925]: I0121 11:33:07.474613 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:08 crc kubenswrapper[4925]: I0121 11:33:08.718453 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:10 crc kubenswrapper[4925]: I0121 11:33:10.004810 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:11 crc kubenswrapper[4925]: I0121 11:33:11.372155 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:12 crc kubenswrapper[4925]: I0121 11:33:12.759187 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:13 crc kubenswrapper[4925]: I0121 11:33:13.496800 4925 scope.go:117] "RemoveContainer" containerID="389a7210a2a6ab5ba90a1a2c0e1a88de2a7ec3eca48bcaf1bd962604589e5bbf" Jan 21 11:33:13 crc kubenswrapper[4925]: I0121 11:33:13.993294 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:15 crc kubenswrapper[4925]: I0121 11:33:15.364892 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:16 crc kubenswrapper[4925]: I0121 11:33:16.605689 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:17 crc kubenswrapper[4925]: I0121 11:33:17.977298 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:19 crc kubenswrapper[4925]: I0121 11:33:19.337316 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:20 crc kubenswrapper[4925]: I0121 11:33:20.568103 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:21 crc kubenswrapper[4925]: I0121 11:33:21.854517 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:23 crc kubenswrapper[4925]: I0121 11:33:23.119258 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:24 crc kubenswrapper[4925]: I0121 11:33:24.360848 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:25 crc kubenswrapper[4925]: I0121 11:33:25.602290 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:26 crc kubenswrapper[4925]: I0121 11:33:26.293177 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:33:26 crc kubenswrapper[4925]: I0121 11:33:26.858168 4925 scope.go:117] "RemoveContainer" containerID="e060760729d1d90f3b8bfec85c18c8e9d92dfa37150de6540dd2bb87ca2cd627" Jan 21 11:33:26 crc kubenswrapper[4925]: I0121 11:33:26.991882 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:28 crc kubenswrapper[4925]: I0121 11:33:28.252893 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:29 crc kubenswrapper[4925]: I0121 11:33:29.780605 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:30 crc kubenswrapper[4925]: E0121 11:33:30.336189 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 21 11:33:30 crc kubenswrapper[4925]: E0121 11:33:30.336816 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rz8sn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-8mp9b_watcher-kuttl-default(2e843ee1-28fe-459d-9dd7-4a8b41127812): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 11:33:30 crc kubenswrapper[4925]: E0121 11:33:30.338054 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" podUID="2e843ee1-28fe-459d-9dd7-4a8b41127812" Jan 21 11:33:30 crc kubenswrapper[4925]: E0121 11:33:30.564854 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" podUID="2e843ee1-28fe-459d-9dd7-4a8b41127812" Jan 21 11:33:31 crc kubenswrapper[4925]: I0121 11:33:31.044444 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:32 crc kubenswrapper[4925]: I0121 11:33:32.425599 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:33 crc kubenswrapper[4925]: I0121 11:33:33.675206 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:34 crc kubenswrapper[4925]: I0121 11:33:34.952057 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:36 crc kubenswrapper[4925]: I0121 11:33:36.211936 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:37 crc kubenswrapper[4925]: I0121 11:33:37.464974 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:38 crc kubenswrapper[4925]: I0121 11:33:38.704719 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:39 crc kubenswrapper[4925]: I0121 11:33:39.981870 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:41 crc kubenswrapper[4925]: I0121 11:33:41.219559 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:42 crc kubenswrapper[4925]: I0121 11:33:42.489448 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:43 crc kubenswrapper[4925]: I0121 11:33:43.715377 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:44 crc kubenswrapper[4925]: I0121 11:33:44.953993 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:45 crc kubenswrapper[4925]: I0121 11:33:45.957429 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" event={"ID":"2e843ee1-28fe-459d-9dd7-4a8b41127812","Type":"ContainerStarted","Data":"a1a520a5aa970b1272b144e532e7c9ac0dd70608fa3d7a4bd5a4e76c4a0de0d7"} Jan 21 11:33:45 crc kubenswrapper[4925]: I0121 11:33:45.988958 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" podStartSLOduration=2.632193343 podStartE2EDuration="45.988928383s" podCreationTimestamp="2026-01-21 11:33:00 +0000 UTC" firstStartedPulling="2026-01-21 11:33:01.606535443 +0000 UTC m=+2273.210427377" lastFinishedPulling="2026-01-21 11:33:44.963270483 +0000 UTC m=+2316.567162417" observedRunningTime="2026-01-21 11:33:45.981078236 +0000 UTC m=+2317.584970170" watchObservedRunningTime="2026-01-21 11:33:45.988928383 +0000 UTC m=+2317.592820317" Jan 21 11:33:46 crc kubenswrapper[4925]: I0121 11:33:46.189131 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:47 crc kubenswrapper[4925]: I0121 11:33:47.459541 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:48 crc kubenswrapper[4925]: I0121 11:33:48.717804 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:49 crc kubenswrapper[4925]: I0121 11:33:49.965924 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:51 crc kubenswrapper[4925]: I0121 11:33:51.200298 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:52 crc kubenswrapper[4925]: I0121 11:33:52.438380 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:53 crc kubenswrapper[4925]: I0121 11:33:53.734704 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:54 crc kubenswrapper[4925]: I0121 11:33:54.045139 4925 generic.go:334] "Generic (PLEG): container finished" podID="2e843ee1-28fe-459d-9dd7-4a8b41127812" containerID="a1a520a5aa970b1272b144e532e7c9ac0dd70608fa3d7a4bd5a4e76c4a0de0d7" exitCode=0 Jan 21 11:33:54 crc kubenswrapper[4925]: I0121 11:33:54.045337 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" event={"ID":"2e843ee1-28fe-459d-9dd7-4a8b41127812","Type":"ContainerDied","Data":"a1a520a5aa970b1272b144e532e7c9ac0dd70608fa3d7a4bd5a4e76c4a0de0d7"} Jan 21 11:33:54 crc kubenswrapper[4925]: I0121 11:33:54.996357 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.459005 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.655580 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-combined-ca-bundle\") pod \"2e843ee1-28fe-459d-9dd7-4a8b41127812\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.655718 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-scripts\") pod \"2e843ee1-28fe-459d-9dd7-4a8b41127812\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.655749 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-config-data\") pod \"2e843ee1-28fe-459d-9dd7-4a8b41127812\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.656008 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz8sn\" (UniqueName: \"kubernetes.io/projected/2e843ee1-28fe-459d-9dd7-4a8b41127812-kube-api-access-rz8sn\") pod \"2e843ee1-28fe-459d-9dd7-4a8b41127812\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.656089 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e843ee1-28fe-459d-9dd7-4a8b41127812-etc-machine-id\") pod \"2e843ee1-28fe-459d-9dd7-4a8b41127812\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.656127 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-db-sync-config-data\") pod \"2e843ee1-28fe-459d-9dd7-4a8b41127812\" (UID: \"2e843ee1-28fe-459d-9dd7-4a8b41127812\") " Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.656361 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e843ee1-28fe-459d-9dd7-4a8b41127812-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2e843ee1-28fe-459d-9dd7-4a8b41127812" (UID: "2e843ee1-28fe-459d-9dd7-4a8b41127812"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.659254 4925 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e843ee1-28fe-459d-9dd7-4a8b41127812-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.662716 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2e843ee1-28fe-459d-9dd7-4a8b41127812" (UID: "2e843ee1-28fe-459d-9dd7-4a8b41127812"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.662841 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-scripts" (OuterVolumeSpecName: "scripts") pod "2e843ee1-28fe-459d-9dd7-4a8b41127812" (UID: "2e843ee1-28fe-459d-9dd7-4a8b41127812"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.681703 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e843ee1-28fe-459d-9dd7-4a8b41127812-kube-api-access-rz8sn" (OuterVolumeSpecName: "kube-api-access-rz8sn") pod "2e843ee1-28fe-459d-9dd7-4a8b41127812" (UID: "2e843ee1-28fe-459d-9dd7-4a8b41127812"). InnerVolumeSpecName "kube-api-access-rz8sn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.687181 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e843ee1-28fe-459d-9dd7-4a8b41127812" (UID: "2e843ee1-28fe-459d-9dd7-4a8b41127812"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.713185 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-config-data" (OuterVolumeSpecName: "config-data") pod "2e843ee1-28fe-459d-9dd7-4a8b41127812" (UID: "2e843ee1-28fe-459d-9dd7-4a8b41127812"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.761553 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.761609 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.761625 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.761638 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz8sn\" (UniqueName: \"kubernetes.io/projected/2e843ee1-28fe-459d-9dd7-4a8b41127812-kube-api-access-rz8sn\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:55 crc kubenswrapper[4925]: I0121 11:33:55.761653 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e843ee1-28fe-459d-9dd7-4a8b41127812-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.067166 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" event={"ID":"2e843ee1-28fe-459d-9dd7-4a8b41127812","Type":"ContainerDied","Data":"3822fe663f2cb11fbdc91056531fc72dbf2332eb72d0a820187633cce50ab252"} Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.067220 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3822fe663f2cb11fbdc91056531fc72dbf2332eb72d0a820187633cce50ab252" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.067330 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-db-sync-8mp9b" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.278365 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.418703 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:33:56 crc kubenswrapper[4925]: E0121 11:33:56.419540 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="extract-utilities" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.421083 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="extract-utilities" Jan 21 11:33:56 crc kubenswrapper[4925]: E0121 11:33:56.421195 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="extract-content" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.421288 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="extract-content" Jan 21 11:33:56 crc kubenswrapper[4925]: E0121 11:33:56.421384 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="registry-server" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.421477 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="registry-server" Jan 21 11:33:56 crc kubenswrapper[4925]: E0121 11:33:56.421569 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e843ee1-28fe-459d-9dd7-4a8b41127812" containerName="cinder-db-sync" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.421638 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e843ee1-28fe-459d-9dd7-4a8b41127812" containerName="cinder-db-sync" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.421986 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e843ee1-28fe-459d-9dd7-4a8b41127812" containerName="cinder-db-sync" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.422123 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="87aa630b-73cc-4364-99bd-f6cd204056ef" containerName="registry-server" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.423436 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.426624 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-cinder-dockercfg-9cmn8" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.426919 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scheduler-config-data" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.427103 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-config-data" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.427261 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scripts" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.449074 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.484156 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.486420 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.500756 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-backup-config-data" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.511183 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.578908 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.579271 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.579520 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.579660 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.579806 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb84j\" (UniqueName: \"kubernetes.io/projected/5b146d44-ea1d-4c52-99e3-c9a16124fd89-kube-api-access-qb84j\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.579970 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-scripts\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.580047 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5b146d44-ea1d-4c52-99e3-c9a16124fd89-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682139 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb8sm\" (UniqueName: \"kubernetes.io/projected/80dfa626-dc13-44ac-ba64-0a70da810969-kube-api-access-lb8sm\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682213 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682240 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682262 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data-custom\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682305 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682345 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb84j\" (UniqueName: \"kubernetes.io/projected/5b146d44-ea1d-4c52-99e3-c9a16124fd89-kube-api-access-qb84j\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682390 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-scripts\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682430 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5b146d44-ea1d-4c52-99e3-c9a16124fd89-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682454 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682479 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682498 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-nvme\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682518 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682550 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682577 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682611 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-run\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682638 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-dev\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682655 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-scripts\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682673 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682703 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682729 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682798 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-sys\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682869 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-lib-modules\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.682939 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.683834 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5b146d44-ea1d-4c52-99e3-c9a16124fd89-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.690971 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.693136 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.693429 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.694087 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-scripts\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.704313 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.710527 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb84j\" (UniqueName: \"kubernetes.io/projected/5b146d44-ea1d-4c52-99e3-c9a16124fd89-kube-api-access-qb84j\") pod \"cinder-scheduler-0\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.713161 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.715352 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.721234 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-api-config-data" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.748079 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.749241 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793025 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793095 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793121 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-nvme\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793152 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793172 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793207 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-run\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793249 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-dev\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793279 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-scripts\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793297 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793327 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793368 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-sys\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793411 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-lib-modules\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793446 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793472 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb8sm\" (UniqueName: \"kubernetes.io/projected/80dfa626-dc13-44ac-ba64-0a70da810969-kube-api-access-lb8sm\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793500 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data-custom\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.793528 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.794117 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.797172 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-dev\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.797388 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.798990 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-nvme\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.799544 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.799628 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-run\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.799654 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-sys\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.799689 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-lib-modules\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.799710 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.801870 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.810121 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-scripts\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.812930 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.829492 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.830505 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.830831 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data-custom\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.855239 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb8sm\" (UniqueName: \"kubernetes.io/projected/80dfa626-dc13-44ac-ba64-0a70da810969-kube-api-access-lb8sm\") pod \"cinder-backup-0\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.898652 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.898987 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bccc878-cbb0-4406-8bea-f1154caf451f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.899121 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bccc878-cbb0-4406-8bea-f1154caf451f-logs\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.899261 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data-custom\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.899445 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.899604 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rp64\" (UniqueName: \"kubernetes.io/projected/4bccc878-cbb0-4406-8bea-f1154caf451f-kube-api-access-4rp64\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.899714 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-scripts\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:56 crc kubenswrapper[4925]: I0121 11:33:56.899804 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003384 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bccc878-cbb0-4406-8bea-f1154caf451f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003466 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bccc878-cbb0-4406-8bea-f1154caf451f-logs\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003523 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data-custom\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003556 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003615 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rp64\" (UniqueName: \"kubernetes.io/projected/4bccc878-cbb0-4406-8bea-f1154caf451f-kube-api-access-4rp64\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003644 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-scripts\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003663 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.003703 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.007557 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bccc878-cbb0-4406-8bea-f1154caf451f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.009075 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bccc878-cbb0-4406-8bea-f1154caf451f-logs\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.009092 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.014305 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.020332 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.033121 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-scripts\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.033816 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data-custom\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.037765 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rp64\" (UniqueName: \"kubernetes.io/projected/4bccc878-cbb0-4406-8bea-f1154caf451f-kube-api-access-4rp64\") pod \"cinder-api-0\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.118916 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.228074 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.750531 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:33:57 crc kubenswrapper[4925]: I0121 11:33:57.762735 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:58 crc kubenswrapper[4925]: I0121 11:33:58.098267 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:33:58 crc kubenswrapper[4925]: W0121 11:33:58.123735 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80dfa626_dc13_44ac_ba64_0a70da810969.slice/crio-a6eb5c414bb0377208395f87a93d638e313721fcad04108ef0a9705b58d64013 WatchSource:0}: Error finding container a6eb5c414bb0377208395f87a93d638e313721fcad04108ef0a9705b58d64013: Status 404 returned error can't find the container with id a6eb5c414bb0377208395f87a93d638e313721fcad04108ef0a9705b58d64013 Jan 21 11:33:58 crc kubenswrapper[4925]: I0121 11:33:58.174968 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"5b146d44-ea1d-4c52-99e3-c9a16124fd89","Type":"ContainerStarted","Data":"bbd9cb292cf82b987cf43b84ca66a5813a4fa3f1ce8c85d86fa90e482dd22bd5"} Jan 21 11:33:58 crc kubenswrapper[4925]: I0121 11:33:58.257894 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:33:59 crc kubenswrapper[4925]: I0121 11:33:59.080638 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:33:59 crc kubenswrapper[4925]: I0121 11:33:59.199667 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"80dfa626-dc13-44ac-ba64-0a70da810969","Type":"ContainerStarted","Data":"a6eb5c414bb0377208395f87a93d638e313721fcad04108ef0a9705b58d64013"} Jan 21 11:33:59 crc kubenswrapper[4925]: I0121 11:33:59.204763 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"4bccc878-cbb0-4406-8bea-f1154caf451f","Type":"ContainerStarted","Data":"a376c8bebd3f00594a7652259575f145e95f1504b6cb863cf1b700b183fe1942"} Jan 21 11:34:00 crc kubenswrapper[4925]: I0121 11:34:00.439759 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:01 crc kubenswrapper[4925]: I0121 11:34:01.781841 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"80dfa626-dc13-44ac-ba64-0a70da810969","Type":"ContainerStarted","Data":"bc7ef51c508dd0acd8517fbbc23a971d8f08aa8be247a991d19d153b946719d1"} Jan 21 11:34:01 crc kubenswrapper[4925]: I0121 11:34:01.793946 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"4bccc878-cbb0-4406-8bea-f1154caf451f","Type":"ContainerStarted","Data":"f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308"} Jan 21 11:34:02 crc kubenswrapper[4925]: I0121 11:34:02.006166 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:02 crc kubenswrapper[4925]: I0121 11:34:02.389629 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:02 crc kubenswrapper[4925]: I0121 11:34:02.894027 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"5b146d44-ea1d-4c52-99e3-c9a16124fd89","Type":"ContainerStarted","Data":"199768c90bfcb14d2ca01b845675ec21543c3dc829be32a0b78c5c785b149792"} Jan 21 11:34:02 crc kubenswrapper[4925]: I0121 11:34:02.938553 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"80dfa626-dc13-44ac-ba64-0a70da810969","Type":"ContainerStarted","Data":"aa8fdf2e122406e76d71e30d7ec801c40f579239474c8a0ffd03ab4b266d8046"} Jan 21 11:34:02 crc kubenswrapper[4925]: I0121 11:34:02.985103 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-backup-0" podStartSLOduration=5.738856412 podStartE2EDuration="6.985077306s" podCreationTimestamp="2026-01-21 11:33:56 +0000 UTC" firstStartedPulling="2026-01-21 11:33:58.172616498 +0000 UTC m=+2329.776508432" lastFinishedPulling="2026-01-21 11:33:59.418837392 +0000 UTC m=+2331.022729326" observedRunningTime="2026-01-21 11:34:02.972294132 +0000 UTC m=+2334.576186066" watchObservedRunningTime="2026-01-21 11:34:02.985077306 +0000 UTC m=+2334.588969240" Jan 21 11:34:03 crc kubenswrapper[4925]: I0121 11:34:03.314697 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:03 crc kubenswrapper[4925]: I0121 11:34:03.949987 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"4bccc878-cbb0-4406-8bea-f1154caf451f","Type":"ContainerStarted","Data":"5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca"} Jan 21 11:34:03 crc kubenswrapper[4925]: I0121 11:34:03.950378 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api-log" containerID="cri-o://f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308" gracePeriod=30 Jan 21 11:34:03 crc kubenswrapper[4925]: I0121 11:34:03.950579 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api" containerID="cri-o://5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca" gracePeriod=30 Jan 21 11:34:03 crc kubenswrapper[4925]: I0121 11:34:03.950864 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:03 crc kubenswrapper[4925]: I0121 11:34:03.960730 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"5b146d44-ea1d-4c52-99e3-c9a16124fd89","Type":"ContainerStarted","Data":"287b2b2b7dc7095f10b3169f1616bf028f06ebc0c095128d453405d28fbee65d"} Jan 21 11:34:03 crc kubenswrapper[4925]: I0121 11:34:03.989785 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-api-0" podStartSLOduration=7.989752213 podStartE2EDuration="7.989752213s" podCreationTimestamp="2026-01-21 11:33:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:34:03.977835397 +0000 UTC m=+2335.581727341" watchObservedRunningTime="2026-01-21 11:34:03.989752213 +0000 UTC m=+2335.593644147" Jan 21 11:34:04 crc kubenswrapper[4925]: I0121 11:34:04.032028 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-scheduler-0" podStartSLOduration=6.927739938 podStartE2EDuration="8.032008136s" podCreationTimestamp="2026-01-21 11:33:56 +0000 UTC" firstStartedPulling="2026-01-21 11:33:57.652579161 +0000 UTC m=+2329.256471095" lastFinishedPulling="2026-01-21 11:33:58.756847359 +0000 UTC m=+2330.360739293" observedRunningTime="2026-01-21 11:34:04.02803583 +0000 UTC m=+2335.631927764" watchObservedRunningTime="2026-01-21 11:34:04.032008136 +0000 UTC m=+2335.635900070" Jan 21 11:34:04 crc kubenswrapper[4925]: I0121 11:34:04.833463 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:05 crc kubenswrapper[4925]: I0121 11:34:05.097879 4925 generic.go:334] "Generic (PLEG): container finished" podID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerID="f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308" exitCode=143 Jan 21 11:34:05 crc kubenswrapper[4925]: I0121 11:34:05.098901 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"4bccc878-cbb0-4406-8bea-f1154caf451f","Type":"ContainerDied","Data":"f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308"} Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.046524 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.087570 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.103021 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.103185 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-scripts\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.103219 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data-custom\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.103344 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-combined-ca-bundle\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.108654 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rp64\" (UniqueName: \"kubernetes.io/projected/4bccc878-cbb0-4406-8bea-f1154caf451f-kube-api-access-4rp64\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.108763 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bccc878-cbb0-4406-8bea-f1154caf451f-etc-machine-id\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.108844 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-cert-memcached-mtls\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.108974 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bccc878-cbb0-4406-8bea-f1154caf451f-logs\") pod \"4bccc878-cbb0-4406-8bea-f1154caf451f\" (UID: \"4bccc878-cbb0-4406-8bea-f1154caf451f\") " Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.112727 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bccc878-cbb0-4406-8bea-f1154caf451f-logs" (OuterVolumeSpecName: "logs") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.112878 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4bccc878-cbb0-4406-8bea-f1154caf451f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.120022 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bccc878-cbb0-4406-8bea-f1154caf451f-kube-api-access-4rp64" (OuterVolumeSpecName: "kube-api-access-4rp64") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "kube-api-access-4rp64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.131466 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.136617 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-scripts" (OuterVolumeSpecName: "scripts") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.204734 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.267836 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bccc878-cbb0-4406-8bea-f1154caf451f-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.267936 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.268178 4925 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.268225 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.268265 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rp64\" (UniqueName: \"kubernetes.io/projected/4bccc878-cbb0-4406-8bea-f1154caf451f-kube-api-access-4rp64\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.268300 4925 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bccc878-cbb0-4406-8bea-f1154caf451f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.304852 4925 generic.go:334] "Generic (PLEG): container finished" podID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerID="5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca" exitCode=0 Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.305272 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"4bccc878-cbb0-4406-8bea-f1154caf451f","Type":"ContainerDied","Data":"5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca"} Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.305308 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"4bccc878-cbb0-4406-8bea-f1154caf451f","Type":"ContainerDied","Data":"a376c8bebd3f00594a7652259575f145e95f1504b6cb863cf1b700b183fe1942"} Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.305348 4925 scope.go:117] "RemoveContainer" containerID="5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.305590 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.368539 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data" (OuterVolumeSpecName: "config-data") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.368775 4925 scope.go:117] "RemoveContainer" containerID="f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.381809 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.412148 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "4bccc878-cbb0-4406-8bea-f1154caf451f" (UID: "4bccc878-cbb0-4406-8bea-f1154caf451f"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.703092 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/4bccc878-cbb0-4406-8bea-f1154caf451f-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.725612 4925 scope.go:117] "RemoveContainer" containerID="5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca" Jan 21 11:34:06 crc kubenswrapper[4925]: E0121 11:34:06.755645 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca\": container with ID starting with 5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca not found: ID does not exist" containerID="5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.755703 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca"} err="failed to get container status \"5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca\": rpc error: code = NotFound desc = could not find container \"5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca\": container with ID starting with 5d7dac838933f895f6eac19f44654e83a1203e7e696a975a6ad705f1fe9746ca not found: ID does not exist" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.755736 4925 scope.go:117] "RemoveContainer" containerID="f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.755907 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:06 crc kubenswrapper[4925]: E0121 11:34:06.756974 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308\": container with ID starting with f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308 not found: ID does not exist" containerID="f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.757004 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308"} err="failed to get container status \"f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308\": rpc error: code = NotFound desc = could not find container \"f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308\": container with ID starting with f3b013eb6737441d578b3907666fc8a6178819eebf5bb8f4e48d6b026068f308 not found: ID does not exist" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.770641 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.783848 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.831422 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:06 crc kubenswrapper[4925]: E0121 11:34:06.831935 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.831956 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api" Jan 21 11:34:06 crc kubenswrapper[4925]: E0121 11:34:06.831976 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api-log" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.831982 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api-log" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.832150 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api-log" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.832173 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" containerName="cinder-api" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.833458 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.841037 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-cinder-public-svc" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.841335 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-cinder-internal-svc" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.853918 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-api-config-data" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.864743 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.905607 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.905732 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.905764 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-scripts\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.905835 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57345c8f-262f-4a3d-812d-3e8c465a8216-etc-machine-id\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.905871 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.905914 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hm2x\" (UniqueName: \"kubernetes.io/projected/57345c8f-262f-4a3d-812d-3e8c465a8216-kube-api-access-9hm2x\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.905959 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data-custom\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.906018 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57345c8f-262f-4a3d-812d-3e8c465a8216-logs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.906050 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-public-tls-certs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:06 crc kubenswrapper[4925]: I0121 11:34:06.906079 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008015 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57345c8f-262f-4a3d-812d-3e8c465a8216-logs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008088 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-public-tls-certs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008149 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008195 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008282 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008318 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-scripts\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008355 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57345c8f-262f-4a3d-812d-3e8c465a8216-etc-machine-id\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008412 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008447 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hm2x\" (UniqueName: \"kubernetes.io/projected/57345c8f-262f-4a3d-812d-3e8c465a8216-kube-api-access-9hm2x\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008522 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data-custom\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.008608 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57345c8f-262f-4a3d-812d-3e8c465a8216-etc-machine-id\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.009131 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57345c8f-262f-4a3d-812d-3e8c465a8216-logs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.012593 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.013260 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data-custom\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.015179 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-public-tls-certs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.015812 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.017667 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.033508 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-scripts\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.045171 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-cert-memcached-mtls\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.059264 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hm2x\" (UniqueName: \"kubernetes.io/projected/57345c8f-262f-4a3d-812d-3e8c465a8216-kube-api-access-9hm2x\") pod \"cinder-api-0\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.259380 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.259492 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.573799 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bccc878-cbb0-4406-8bea-f1154caf451f" path="/var/lib/kubelet/pods/4bccc878-cbb0-4406-8bea-f1154caf451f/volumes" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.595354 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:07 crc kubenswrapper[4925]: I0121 11:34:07.744836 4925 prober.go:107] "Probe failed" probeType="Startup" pod="watcher-kuttl-default/cinder-backup-0" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 11:34:08 crc kubenswrapper[4925]: I0121 11:34:07.999593 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:08 crc kubenswrapper[4925]: W0121 11:34:08.021637 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57345c8f_262f_4a3d_812d_3e8c465a8216.slice/crio-5b3162df7d56f37e0f88f31ccdb6d41d7e23c32aaaa0430d9ec252bd8ad3a5ae WatchSource:0}: Error finding container 5b3162df7d56f37e0f88f31ccdb6d41d7e23c32aaaa0430d9ec252bd8ad3a5ae: Status 404 returned error can't find the container with id 5b3162df7d56f37e0f88f31ccdb6d41d7e23c32aaaa0430d9ec252bd8ad3a5ae Jan 21 11:34:08 crc kubenswrapper[4925]: I0121 11:34:08.787349 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"57345c8f-262f-4a3d-812d-3e8c465a8216","Type":"ContainerStarted","Data":"5b3162df7d56f37e0f88f31ccdb6d41d7e23c32aaaa0430d9ec252bd8ad3a5ae"} Jan 21 11:34:09 crc kubenswrapper[4925]: I0121 11:34:09.019795 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:09 crc kubenswrapper[4925]: I0121 11:34:09.955771 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"57345c8f-262f-4a3d-812d-3e8c465a8216","Type":"ContainerStarted","Data":"c8a815bebc208c2f565ee345818ea5743202f1e3a5387978048ba369bb3dec61"} Jan 21 11:34:10 crc kubenswrapper[4925]: I0121 11:34:10.307200 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:11 crc kubenswrapper[4925]: I0121 11:34:11.576114 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:12 crc kubenswrapper[4925]: I0121 11:34:12.167114 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:12 crc kubenswrapper[4925]: I0121 11:34:12.240109 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:12 crc kubenswrapper[4925]: I0121 11:34:12.440470 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:12 crc kubenswrapper[4925]: I0121 11:34:12.499872 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:12 crc kubenswrapper[4925]: I0121 11:34:12.988535 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:13 crc kubenswrapper[4925]: I0121 11:34:13.150060 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"57345c8f-262f-4a3d-812d-3e8c465a8216","Type":"ContainerStarted","Data":"346f72749d51a646b3de207c9d05e310bb1b0e14acd1813725fe77eb29e75e1b"} Jan 21 11:34:13 crc kubenswrapper[4925]: I0121 11:34:13.150316 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="cinder-scheduler" containerID="cri-o://199768c90bfcb14d2ca01b845675ec21543c3dc829be32a0b78c5c785b149792" gracePeriod=30 Jan 21 11:34:13 crc kubenswrapper[4925]: I0121 11:34:13.150448 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="probe" containerID="cri-o://287b2b2b7dc7095f10b3169f1616bf028f06ebc0c095128d453405d28fbee65d" gracePeriod=30 Jan 21 11:34:13 crc kubenswrapper[4925]: I0121 11:34:13.150654 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="cinder-backup" containerID="cri-o://bc7ef51c508dd0acd8517fbbc23a971d8f08aa8be247a991d19d153b946719d1" gracePeriod=30 Jan 21 11:34:13 crc kubenswrapper[4925]: I0121 11:34:13.150794 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="probe" containerID="cri-o://aa8fdf2e122406e76d71e30d7ec801c40f579239474c8a0ffd03ab4b266d8046" gracePeriod=30 Jan 21 11:34:13 crc kubenswrapper[4925]: I0121 11:34:13.196589 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-api-0" podStartSLOduration=7.196545546 podStartE2EDuration="7.196545546s" podCreationTimestamp="2026-01-21 11:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:34:13.188055088 +0000 UTC m=+2344.791947032" watchObservedRunningTime="2026-01-21 11:34:13.196545546 +0000 UTC m=+2344.800437480" Jan 21 11:34:14 crc kubenswrapper[4925]: I0121 11:34:14.011262 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:14 crc kubenswrapper[4925]: I0121 11:34:14.011635 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="915b5d82-f42c-4046-9e7e-2581a6979377" containerName="watcher-decision-engine" containerID="cri-o://4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd" gracePeriod=30 Jan 21 11:34:14 crc kubenswrapper[4925]: I0121 11:34:14.159417 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:14 crc kubenswrapper[4925]: I0121 11:34:14.284663 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:15 crc kubenswrapper[4925]: I0121 11:34:15.197875 4925 generic.go:334] "Generic (PLEG): container finished" podID="80dfa626-dc13-44ac-ba64-0a70da810969" containerID="aa8fdf2e122406e76d71e30d7ec801c40f579239474c8a0ffd03ab4b266d8046" exitCode=0 Jan 21 11:34:15 crc kubenswrapper[4925]: I0121 11:34:15.198888 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"80dfa626-dc13-44ac-ba64-0a70da810969","Type":"ContainerDied","Data":"aa8fdf2e122406e76d71e30d7ec801c40f579239474c8a0ffd03ab4b266d8046"} Jan 21 11:34:15 crc kubenswrapper[4925]: I0121 11:34:15.775797 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:16 crc kubenswrapper[4925]: I0121 11:34:16.282667 4925 generic.go:334] "Generic (PLEG): container finished" podID="80dfa626-dc13-44ac-ba64-0a70da810969" containerID="bc7ef51c508dd0acd8517fbbc23a971d8f08aa8be247a991d19d153b946719d1" exitCode=0 Jan 21 11:34:16 crc kubenswrapper[4925]: I0121 11:34:16.282748 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"80dfa626-dc13-44ac-ba64-0a70da810969","Type":"ContainerDied","Data":"bc7ef51c508dd0acd8517fbbc23a971d8f08aa8be247a991d19d153b946719d1"} Jan 21 11:34:16 crc kubenswrapper[4925]: I0121 11:34:16.285485 4925 generic.go:334] "Generic (PLEG): container finished" podID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerID="287b2b2b7dc7095f10b3169f1616bf028f06ebc0c095128d453405d28fbee65d" exitCode=0 Jan 21 11:34:16 crc kubenswrapper[4925]: I0121 11:34:16.285522 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"5b146d44-ea1d-4c52-99e3-c9a16124fd89","Type":"ContainerDied","Data":"287b2b2b7dc7095f10b3169f1616bf028f06ebc0c095128d453405d28fbee65d"} Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.107142 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216154 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-iscsi\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216266 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-cinder\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216316 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-nvme\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216368 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-sys\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216355 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216460 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216485 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lb8sm\" (UniqueName: \"kubernetes.io/projected/80dfa626-dc13-44ac-ba64-0a70da810969-kube-api-access-lb8sm\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216522 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-cinder" (OuterVolumeSpecName: "var-locks-cinder") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "var-locks-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216639 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-cert-memcached-mtls\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216748 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-lib-cinder\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216805 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-scripts\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216833 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data-custom\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216854 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216871 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-run\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216914 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-combined-ca-bundle\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216998 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-machine-id\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217026 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-lib-modules\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217046 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-dev\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217065 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-brick\") pod \"80dfa626-dc13-44ac-ba64-0a70da810969\" (UID: \"80dfa626-dc13-44ac-ba64-0a70da810969\") " Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217372 4925 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217385 4925 reconciler_common.go:293] "Volume detached for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-cinder\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217415 4925 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217444 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217470 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-lib-cinder" (OuterVolumeSpecName: "var-lib-cinder") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "var-lib-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.216610 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-sys" (OuterVolumeSpecName: "sys") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.217902 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-run" (OuterVolumeSpecName: "run") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.219034 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.219085 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-dev" (OuterVolumeSpecName: "dev") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.219119 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.225381 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80dfa626-dc13-44ac-ba64-0a70da810969-kube-api-access-lb8sm" (OuterVolumeSpecName: "kube-api-access-lb8sm") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "kube-api-access-lb8sm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.227712 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-scripts" (OuterVolumeSpecName: "scripts") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.236944 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.310966 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"80dfa626-dc13-44ac-ba64-0a70da810969","Type":"ContainerDied","Data":"a6eb5c414bb0377208395f87a93d638e313721fcad04108ef0a9705b58d64013"} Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.312431 4925 scope.go:117] "RemoveContainer" containerID="aa8fdf2e122406e76d71e30d7ec801c40f579239474c8a0ffd03ab4b266d8046" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.313585 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321175 4925 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321228 4925 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321258 4925 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321278 4925 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-dev\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321298 4925 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-sys\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321310 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lb8sm\" (UniqueName: \"kubernetes.io/projected/80dfa626-dc13-44ac-ba64-0a70da810969-kube-api-access-lb8sm\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321324 4925 reconciler_common.go:293] "Volume detached for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-var-lib-cinder\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321353 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321378 4925 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.321410 4925 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/80dfa626-dc13-44ac-ba64-0a70da810969-run\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.362615 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.367751 4925 generic.go:334] "Generic (PLEG): container finished" podID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerID="199768c90bfcb14d2ca01b845675ec21543c3dc829be32a0b78c5c785b149792" exitCode=0 Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.367838 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"5b146d44-ea1d-4c52-99e3-c9a16124fd89","Type":"ContainerDied","Data":"199768c90bfcb14d2ca01b845675ec21543c3dc829be32a0b78c5c785b149792"} Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.418702 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.426815 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data" (OuterVolumeSpecName: "config-data") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.427058 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.498729 4925 scope.go:117] "RemoveContainer" containerID="bc7ef51c508dd0acd8517fbbc23a971d8f08aa8be247a991d19d153b946719d1" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.519622 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "80dfa626-dc13-44ac-ba64-0a70da810969" (UID: "80dfa626-dc13-44ac-ba64-0a70da810969"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.538099 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.538149 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dfa626-dc13-44ac-ba64-0a70da810969-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.785953 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.819513 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.851064 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:17 crc kubenswrapper[4925]: E0121 11:34:17.851813 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="probe" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.851837 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="probe" Jan 21 11:34:17 crc kubenswrapper[4925]: E0121 11:34:17.851862 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="cinder-backup" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.851870 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="cinder-backup" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.852107 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="cinder-backup" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.852168 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" containerName="probe" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.853483 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.857980 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-backup-config-data" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.889968 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899239 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899361 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-lib-modules\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899440 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-sys\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899505 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899556 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899606 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899662 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899697 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899755 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d82nl\" (UniqueName: \"kubernetes.io/projected/bce0e839-af46-4bcc-a21a-91968ec1ace7-kube-api-access-d82nl\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899797 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-dev\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899878 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-nvme\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899914 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data-custom\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.899953 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-scripts\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.902521 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.902613 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.902681 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-run\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:17 crc kubenswrapper[4925]: I0121 11:34:17.987200 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.004571 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-cert-memcached-mtls\") pod \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.004686 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data\") pod \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.004736 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data-custom\") pod \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.004754 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-scripts\") pod \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.004859 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5b146d44-ea1d-4c52-99e3-c9a16124fd89-etc-machine-id\") pod \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.004885 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-combined-ca-bundle\") pod \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.004923 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qb84j\" (UniqueName: \"kubernetes.io/projected/5b146d44-ea1d-4c52-99e3-c9a16124fd89-kube-api-access-qb84j\") pod \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\" (UID: \"5b146d44-ea1d-4c52-99e3-c9a16124fd89\") " Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005238 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-nvme\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005283 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data-custom\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005326 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-scripts\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005359 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005422 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005461 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-run\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005500 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005535 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-lib-modules\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005555 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-sys\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005579 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005611 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005639 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005663 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005689 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005715 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d82nl\" (UniqueName: \"kubernetes.io/projected/bce0e839-af46-4bcc-a21a-91968ec1ace7-kube-api-access-d82nl\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005742 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-dev\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.005844 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-dev\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.007166 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-run\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.008026 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-nvme\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.008231 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b146d44-ea1d-4c52-99e3-c9a16124fd89-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5b146d44-ea1d-4c52-99e3-c9a16124fd89" (UID: "5b146d44-ea1d-4c52-99e3-c9a16124fd89"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.010227 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.012696 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5b146d44-ea1d-4c52-99e3-c9a16124fd89" (UID: "5b146d44-ea1d-4c52-99e3-c9a16124fd89"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.013905 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.013979 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.014058 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-lib-modules\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.014134 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.014167 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-sys\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.014219 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.029372 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b146d44-ea1d-4c52-99e3-c9a16124fd89-kube-api-access-qb84j" (OuterVolumeSpecName: "kube-api-access-qb84j") pod "5b146d44-ea1d-4c52-99e3-c9a16124fd89" (UID: "5b146d44-ea1d-4c52-99e3-c9a16124fd89"). InnerVolumeSpecName "kube-api-access-qb84j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.031761 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-scripts" (OuterVolumeSpecName: "scripts") pod "5b146d44-ea1d-4c52-99e3-c9a16124fd89" (UID: "5b146d44-ea1d-4c52-99e3-c9a16124fd89"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.032439 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.040218 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data-custom\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.041914 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-cert-memcached-mtls\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.047161 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-scripts\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.048168 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d82nl\" (UniqueName: \"kubernetes.io/projected/bce0e839-af46-4bcc-a21a-91968ec1ace7-kube-api-access-d82nl\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.049120 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data\") pod \"cinder-backup-0\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.107840 4925 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.107891 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.107908 4925 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5b146d44-ea1d-4c52-99e3-c9a16124fd89-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.107919 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qb84j\" (UniqueName: \"kubernetes.io/projected/5b146d44-ea1d-4c52-99e3-c9a16124fd89-kube-api-access-qb84j\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.108457 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b146d44-ea1d-4c52-99e3-c9a16124fd89" (UID: "5b146d44-ea1d-4c52-99e3-c9a16124fd89"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.191577 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data" (OuterVolumeSpecName: "config-data") pod "5b146d44-ea1d-4c52-99e3-c9a16124fd89" (UID: "5b146d44-ea1d-4c52-99e3-c9a16124fd89"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.199672 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.217218 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.217278 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.290771 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.291623 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-central-agent" containerID="cri-o://5dd9edb1ebc8474107f1a3ef3fa74372643f26baeffc5135e86c9aa096873962" gracePeriod=30 Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.292446 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="proxy-httpd" containerID="cri-o://7c92769a060e35c49f2a65bf4eb5e49852a9881d526d341ac6fad7909c08b41e" gracePeriod=30 Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.292573 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="sg-core" containerID="cri-o://102fece7e50a90dd51662a11d44186fed5562a7b71ad6c10468170ce07d7709b" gracePeriod=30 Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.292531 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-notification-agent" containerID="cri-o://4c83e756039b53a4cfb1589d92b4edc6a5513b7d52cee54c440be76fa516a59c" gracePeriod=30 Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.459303 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "5b146d44-ea1d-4c52-99e3-c9a16124fd89" (UID: "5b146d44-ea1d-4c52-99e3-c9a16124fd89"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.465622 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"5b146d44-ea1d-4c52-99e3-c9a16124fd89","Type":"ContainerDied","Data":"bbd9cb292cf82b987cf43b84ca66a5813a4fa3f1ce8c85d86fa90e482dd22bd5"} Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.465908 4925 scope.go:117] "RemoveContainer" containerID="287b2b2b7dc7095f10b3169f1616bf028f06ebc0c095128d453405d28fbee65d" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.466123 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.504046 4925 scope.go:117] "RemoveContainer" containerID="199768c90bfcb14d2ca01b845675ec21543c3dc829be32a0b78c5c785b149792" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.524431 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.532891 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.541587 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5b146d44-ea1d-4c52-99e3-c9a16124fd89-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.563704 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:18 crc kubenswrapper[4925]: E0121 11:34:18.564326 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="cinder-scheduler" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.564433 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="cinder-scheduler" Jan 21 11:34:18 crc kubenswrapper[4925]: E0121 11:34:18.565230 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="probe" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.565300 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="probe" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.565609 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="cinder-scheduler" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.565687 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" containerName="probe" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.567014 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.576680 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.577881 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cinder-scheduler-config-data" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.646696 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.646757 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.646836 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-scripts\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.646970 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/89909e4b-c27f-4cb5-817c-1c3d789b07d7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.647017 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.647167 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx8vm\" (UniqueName: \"kubernetes.io/projected/89909e4b-c27f-4cb5-817c-1c3d789b07d7-kube-api-access-bx8vm\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.647292 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749068 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-scripts\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749479 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/89909e4b-c27f-4cb5-817c-1c3d789b07d7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749524 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749582 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx8vm\" (UniqueName: \"kubernetes.io/projected/89909e4b-c27f-4cb5-817c-1c3d789b07d7-kube-api-access-bx8vm\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749628 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749649 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749676 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.749930 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/89909e4b-c27f-4cb5-817c-1c3d789b07d7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.759189 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.759290 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-cert-memcached-mtls\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.763479 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.766808 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.770244 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-scripts\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.770900 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx8vm\" (UniqueName: \"kubernetes.io/projected/89909e4b-c27f-4cb5-817c-1c3d789b07d7-kube-api-access-bx8vm\") pod \"cinder-scheduler-0\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:18 crc kubenswrapper[4925]: I0121 11:34:18.790210 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.011199 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.210673 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:19 crc kubenswrapper[4925]: W0121 11:34:19.221771 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbce0e839_af46_4bcc_a21a_91968ec1ace7.slice/crio-4f21d7f74c3d1a9dd086b832223663c60c64ca87fcae44420fd84439391714d5 WatchSource:0}: Error finding container 4f21d7f74c3d1a9dd086b832223663c60c64ca87fcae44420fd84439391714d5: Status 404 returned error can't find the container with id 4f21d7f74c3d1a9dd086b832223663c60c64ca87fcae44420fd84439391714d5 Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.571105 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b146d44-ea1d-4c52-99e3-c9a16124fd89" path="/var/lib/kubelet/pods/5b146d44-ea1d-4c52-99e3-c9a16124fd89/volumes" Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.572173 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80dfa626-dc13-44ac-ba64-0a70da810969" path="/var/lib/kubelet/pods/80dfa626-dc13-44ac-ba64-0a70da810969/volumes" Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.575767 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"bce0e839-af46-4bcc-a21a-91968ec1ace7","Type":"ContainerStarted","Data":"4f21d7f74c3d1a9dd086b832223663c60c64ca87fcae44420fd84439391714d5"} Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.601164 4925 generic.go:334] "Generic (PLEG): container finished" podID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerID="7c92769a060e35c49f2a65bf4eb5e49852a9881d526d341ac6fad7909c08b41e" exitCode=0 Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.601221 4925 generic.go:334] "Generic (PLEG): container finished" podID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerID="102fece7e50a90dd51662a11d44186fed5562a7b71ad6c10468170ce07d7709b" exitCode=2 Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.601342 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerDied","Data":"7c92769a060e35c49f2a65bf4eb5e49852a9881d526d341ac6fad7909c08b41e"} Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.601387 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerDied","Data":"102fece7e50a90dd51662a11d44186fed5562a7b71ad6c10468170ce07d7709b"} Jan 21 11:34:19 crc kubenswrapper[4925]: I0121 11:34:19.659010 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.244571 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_915b5d82-f42c-4046-9e7e-2581a6979377/watcher-decision-engine/0.log" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.553254 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.731719 4925 generic.go:334] "Generic (PLEG): container finished" podID="915b5d82-f42c-4046-9e7e-2581a6979377" containerID="4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd" exitCode=0 Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.731863 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.731878 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"915b5d82-f42c-4046-9e7e-2581a6979377","Type":"ContainerDied","Data":"4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd"} Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.731920 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"915b5d82-f42c-4046-9e7e-2581a6979377","Type":"ContainerDied","Data":"5b5765359829282e3055260cccbb76ef1f1bd3dbf1bc4e47621ed9a8bd7e84d7"} Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.731943 4925 scope.go:117] "RemoveContainer" containerID="4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.748016 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-combined-ca-bundle\") pod \"915b5d82-f42c-4046-9e7e-2581a6979377\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.748112 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-custom-prometheus-ca\") pod \"915b5d82-f42c-4046-9e7e-2581a6979377\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.748139 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n28tq\" (UniqueName: \"kubernetes.io/projected/915b5d82-f42c-4046-9e7e-2581a6979377-kube-api-access-n28tq\") pod \"915b5d82-f42c-4046-9e7e-2581a6979377\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.748179 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-config-data\") pod \"915b5d82-f42c-4046-9e7e-2581a6979377\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.748245 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-cert-memcached-mtls\") pod \"915b5d82-f42c-4046-9e7e-2581a6979377\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.748306 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/915b5d82-f42c-4046-9e7e-2581a6979377-logs\") pod \"915b5d82-f42c-4046-9e7e-2581a6979377\" (UID: \"915b5d82-f42c-4046-9e7e-2581a6979377\") " Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.758199 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/915b5d82-f42c-4046-9e7e-2581a6979377-logs" (OuterVolumeSpecName: "logs") pod "915b5d82-f42c-4046-9e7e-2581a6979377" (UID: "915b5d82-f42c-4046-9e7e-2581a6979377"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.763727 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/915b5d82-f42c-4046-9e7e-2581a6979377-kube-api-access-n28tq" (OuterVolumeSpecName: "kube-api-access-n28tq") pod "915b5d82-f42c-4046-9e7e-2581a6979377" (UID: "915b5d82-f42c-4046-9e7e-2581a6979377"). InnerVolumeSpecName "kube-api-access-n28tq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.764139 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"89909e4b-c27f-4cb5-817c-1c3d789b07d7","Type":"ContainerStarted","Data":"85d4acc8676179ffa1ada040da588556deacdfc0cee1f70f22796500b7b6fa52"} Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.785155 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"bce0e839-af46-4bcc-a21a-91968ec1ace7","Type":"ContainerStarted","Data":"42b9bb1217ad33dfc895a34e7e1a745d8ab928a53f80c62aa34112aedbd0cba8"} Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.789916 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "915b5d82-f42c-4046-9e7e-2581a6979377" (UID: "915b5d82-f42c-4046-9e7e-2581a6979377"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.807723 4925 scope.go:117] "RemoveContainer" containerID="4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.808038 4925 generic.go:334] "Generic (PLEG): container finished" podID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerID="5dd9edb1ebc8474107f1a3ef3fa74372643f26baeffc5135e86c9aa096873962" exitCode=0 Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.808109 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerDied","Data":"5dd9edb1ebc8474107f1a3ef3fa74372643f26baeffc5135e86c9aa096873962"} Jan 21 11:34:20 crc kubenswrapper[4925]: E0121 11:34:20.808324 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd\": container with ID starting with 4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd not found: ID does not exist" containerID="4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.808377 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd"} err="failed to get container status \"4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd\": rpc error: code = NotFound desc = could not find container \"4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd\": container with ID starting with 4ff7ce4b2b5c91561b927d3ddff26be6830c443c0ba2fffed5ab55ee043d90fd not found: ID does not exist" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.831691 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "915b5d82-f42c-4046-9e7e-2581a6979377" (UID: "915b5d82-f42c-4046-9e7e-2581a6979377"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.921534 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/915b5d82-f42c-4046-9e7e-2581a6979377-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.921597 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.921633 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.921666 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n28tq\" (UniqueName: \"kubernetes.io/projected/915b5d82-f42c-4046-9e7e-2581a6979377-kube-api-access-n28tq\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.969580 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-backup-0" podStartSLOduration=3.969547252 podStartE2EDuration="3.969547252s" podCreationTimestamp="2026-01-21 11:34:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:34:20.825546401 +0000 UTC m=+2352.429438355" watchObservedRunningTime="2026-01-21 11:34:20.969547252 +0000 UTC m=+2352.573439186" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.979636 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-config-data" (OuterVolumeSpecName: "config-data") pod "915b5d82-f42c-4046-9e7e-2581a6979377" (UID: "915b5d82-f42c-4046-9e7e-2581a6979377"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:20 crc kubenswrapper[4925]: I0121 11:34:20.989718 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "915b5d82-f42c-4046-9e7e-2581a6979377" (UID: "915b5d82-f42c-4046-9e7e-2581a6979377"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.025766 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.026626 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/915b5d82-f42c-4046-9e7e-2581a6979377-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.276817 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.296504 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.317981 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:21 crc kubenswrapper[4925]: E0121 11:34:21.318456 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="915b5d82-f42c-4046-9e7e-2581a6979377" containerName="watcher-decision-engine" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.318475 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="915b5d82-f42c-4046-9e7e-2581a6979377" containerName="watcher-decision-engine" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.326272 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="915b5d82-f42c-4046-9e7e-2581a6979377" containerName="watcher-decision-engine" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.327154 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.332247 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6wst\" (UniqueName: \"kubernetes.io/projected/eb5761b2-6757-4aaf-8ecf-5de81cf90845-kube-api-access-f6wst\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.332324 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.332386 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.332444 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eb5761b2-6757-4aaf-8ecf-5de81cf90845-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.332537 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.332636 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.342638 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.348358 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.434417 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6wst\" (UniqueName: \"kubernetes.io/projected/eb5761b2-6757-4aaf-8ecf-5de81cf90845-kube-api-access-f6wst\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.434945 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.435013 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.435054 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eb5761b2-6757-4aaf-8ecf-5de81cf90845-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.435131 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.435208 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.443363 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eb5761b2-6757-4aaf-8ecf-5de81cf90845-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.446067 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.446990 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.457793 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.458495 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.469326 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6wst\" (UniqueName: \"kubernetes.io/projected/eb5761b2-6757-4aaf-8ecf-5de81cf90845-kube-api-access-f6wst\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.514243 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="915b5d82-f42c-4046-9e7e-2581a6979377" path="/var/lib/kubelet/pods/915b5d82-f42c-4046-9e7e-2581a6979377/volumes" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.653612 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.840156 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"89909e4b-c27f-4cb5-817c-1c3d789b07d7","Type":"ContainerStarted","Data":"1847043b20c9cf78c82826a612b2fd1617792991fdade6bd26cd838a9a78b6ac"} Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.862169 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"bce0e839-af46-4bcc-a21a-91968ec1ace7","Type":"ContainerStarted","Data":"b27796d0dfc73426be7c8bdc5292b9d16905a6991ae8c4727f6037a52c0fe090"} Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.918689 4925 generic.go:334] "Generic (PLEG): container finished" podID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerID="4c83e756039b53a4cfb1589d92b4edc6a5513b7d52cee54c440be76fa516a59c" exitCode=0 Jan 21 11:34:21 crc kubenswrapper[4925]: I0121 11:34:21.918983 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerDied","Data":"4c83e756039b53a4cfb1589d92b4edc6a5513b7d52cee54c440be76fa516a59c"} Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.295718 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/cinder-api-0" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.194:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.509541 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.541020 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689062 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvlv9\" (UniqueName: \"kubernetes.io/projected/3ccb7b10-6925-40de-8856-172fc9ad9077-kube-api-access-tvlv9\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689158 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-ceilometer-tls-certs\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689247 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-sg-core-conf-yaml\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689297 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-run-httpd\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689354 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-log-httpd\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689533 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-scripts\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689567 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-combined-ca-bundle\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.689599 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-config-data\") pod \"3ccb7b10-6925-40de-8856-172fc9ad9077\" (UID: \"3ccb7b10-6925-40de-8856-172fc9ad9077\") " Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.691119 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.692088 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.697992 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-scripts" (OuterVolumeSpecName: "scripts") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.698440 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ccb7b10-6925-40de-8856-172fc9ad9077-kube-api-access-tvlv9" (OuterVolumeSpecName: "kube-api-access-tvlv9") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "kube-api-access-tvlv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.764680 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.791918 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvlv9\" (UniqueName: \"kubernetes.io/projected/3ccb7b10-6925-40de-8856-172fc9ad9077-kube-api-access-tvlv9\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.792220 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.792407 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.792484 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccb7b10-6925-40de-8856-172fc9ad9077-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.792550 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.804728 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:22 crc kubenswrapper[4925]: I0121 11:34:22.846563 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.028986 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.029049 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.067098 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3ccb7b10-6925-40de-8856-172fc9ad9077","Type":"ContainerDied","Data":"1b3e331f3a3b8a4b1dea3a4dd9ba5a1ebc171deb16e7df0c62db1161544ca0a3"} Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.068545 4925 scope.go:117] "RemoveContainer" containerID="7c92769a060e35c49f2a65bf4eb5e49852a9881d526d341ac6fad7909c08b41e" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.068731 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.082649 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"eb5761b2-6757-4aaf-8ecf-5de81cf90845","Type":"ContainerStarted","Data":"81700ca1086dd46af2104c3065d557ff46ef374e406ed0ae5bc5c864b4a6f004"} Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.108929 4925 scope.go:117] "RemoveContainer" containerID="102fece7e50a90dd51662a11d44186fed5562a7b71ad6c10468170ce07d7709b" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.122635 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-config-data" (OuterVolumeSpecName: "config-data") pod "3ccb7b10-6925-40de-8856-172fc9ad9077" (UID: "3ccb7b10-6925-40de-8856-172fc9ad9077"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.145502 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccb7b10-6925-40de-8856-172fc9ad9077-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.202084 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.251764 4925 scope.go:117] "RemoveContainer" containerID="4c83e756039b53a4cfb1589d92b4edc6a5513b7d52cee54c440be76fa516a59c" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.286034 4925 scope.go:117] "RemoveContainer" containerID="5dd9edb1ebc8474107f1a3ef3fa74372643f26baeffc5135e86c9aa096873962" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.447276 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.460560 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.482745 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:23 crc kubenswrapper[4925]: E0121 11:34:23.491722 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-central-agent" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.491770 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-central-agent" Jan 21 11:34:23 crc kubenswrapper[4925]: E0121 11:34:23.491798 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-notification-agent" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.491805 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-notification-agent" Jan 21 11:34:23 crc kubenswrapper[4925]: E0121 11:34:23.491860 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="sg-core" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.491868 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="sg-core" Jan 21 11:34:23 crc kubenswrapper[4925]: E0121 11:34:23.491892 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="proxy-httpd" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.491898 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="proxy-httpd" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.492678 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-central-agent" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.492711 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="ceilometer-notification-agent" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.492734 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="sg-core" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.492756 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" containerName="proxy-httpd" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.518492 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.592615 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-run-httpd\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.809423 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6x4n\" (UniqueName: \"kubernetes.io/projected/f296c8cc-70c9-4e34-80ef-1741d76e96ee-kube-api-access-q6x4n\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.809605 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.809653 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.809784 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-config-data\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.809919 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.809996 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-log-httpd\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.810034 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-scripts\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.826291 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.826688 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.827763 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.859951 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ccb7b10-6925-40de-8856-172fc9ad9077" path="/var/lib/kubelet/pods/3ccb7b10-6925-40de-8856-172fc9ad9077/volumes" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.869001 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.919813 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.919950 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-log-httpd\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.919986 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-scripts\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.920083 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-run-httpd\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.920147 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6x4n\" (UniqueName: \"kubernetes.io/projected/f296c8cc-70c9-4e34-80ef-1741d76e96ee-kube-api-access-q6x4n\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.920210 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.920255 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.920320 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-config-data\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.927257 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-run-httpd\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.933300 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-log-httpd\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.934540 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-config-data\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.942260 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.946947 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.950478 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-scripts\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.959466 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:23 crc kubenswrapper[4925]: I0121 11:34:23.980454 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6x4n\" (UniqueName: \"kubernetes.io/projected/f296c8cc-70c9-4e34-80ef-1741d76e96ee-kube-api-access-q6x4n\") pod \"ceilometer-0\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:24 crc kubenswrapper[4925]: I0121 11:34:24.094948 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"89909e4b-c27f-4cb5-817c-1c3d789b07d7","Type":"ContainerStarted","Data":"c812d7e7df3fcaa9f2a28b162cae8fa85d2294e82b636e6597cb913c48e97b27"} Jan 21 11:34:24 crc kubenswrapper[4925]: I0121 11:34:24.097040 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"eb5761b2-6757-4aaf-8ecf-5de81cf90845","Type":"ContainerStarted","Data":"ca16500b733b5186298e744deaf79e1611286bdda6fd8e8c280cd500a527f15a"} Jan 21 11:34:24 crc kubenswrapper[4925]: I0121 11:34:24.171131 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:24 crc kubenswrapper[4925]: I0121 11:34:24.191090 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=3.191064387 podStartE2EDuration="3.191064387s" podCreationTimestamp="2026-01-21 11:34:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:34:24.188963691 +0000 UTC m=+2355.792855635" watchObservedRunningTime="2026-01-21 11:34:24.191064387 +0000 UTC m=+2355.794956321" Jan 21 11:34:24 crc kubenswrapper[4925]: I0121 11:34:24.191984 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinder-scheduler-0" podStartSLOduration=6.191972176 podStartE2EDuration="6.191972176s" podCreationTimestamp="2026-01-21 11:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:34:24.151765468 +0000 UTC m=+2355.755657412" watchObservedRunningTime="2026-01-21 11:34:24.191972176 +0000 UTC m=+2355.795864110" Jan 21 11:34:24 crc kubenswrapper[4925]: I0121 11:34:24.269636 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="watcher-kuttl-default/cinder-api-0" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.194:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 11:34:24 crc kubenswrapper[4925]: I0121 11:34:24.893089 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:25 crc kubenswrapper[4925]: I0121 11:34:25.110638 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerStarted","Data":"5a0a170d9b17155c40e968ff7e68150786f34dcf5aea2de2e89b598d4275b0d0"} Jan 21 11:34:25 crc kubenswrapper[4925]: I0121 11:34:25.440928 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:26 crc kubenswrapper[4925]: I0121 11:34:26.324257 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerStarted","Data":"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562"} Jan 21 11:34:26 crc kubenswrapper[4925]: I0121 11:34:26.686703 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:27 crc kubenswrapper[4925]: I0121 11:34:27.202127 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:27 crc kubenswrapper[4925]: I0121 11:34:27.345977 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerStarted","Data":"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24"} Jan 21 11:34:28 crc kubenswrapper[4925]: I0121 11:34:28.027704 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:28 crc kubenswrapper[4925]: I0121 11:34:28.490064 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerStarted","Data":"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc"} Jan 21 11:34:28 crc kubenswrapper[4925]: I0121 11:34:28.828296 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:29 crc kubenswrapper[4925]: I0121 11:34:29.034117 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:29 crc kubenswrapper[4925]: I0121 11:34:29.367341 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="watcher-kuttl-default/cinder-api-0" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.194:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 11:34:29 crc kubenswrapper[4925]: I0121 11:34:29.523970 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:29 crc kubenswrapper[4925]: I0121 11:34:29.725476 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:30 crc kubenswrapper[4925]: I0121 11:34:30.685198 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerStarted","Data":"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1"} Jan 21 11:34:30 crc kubenswrapper[4925]: I0121 11:34:30.685627 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:30 crc kubenswrapper[4925]: I0121 11:34:30.924665 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:31 crc kubenswrapper[4925]: I0121 11:34:31.654705 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:31 crc kubenswrapper[4925]: I0121 11:34:31.691078 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:31 crc kubenswrapper[4925]: I0121 11:34:31.697492 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:31 crc kubenswrapper[4925]: I0121 11:34:31.725856 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=4.306175088 podStartE2EDuration="8.725832431s" podCreationTimestamp="2026-01-21 11:34:23 +0000 UTC" firstStartedPulling="2026-01-21 11:34:24.925867806 +0000 UTC m=+2356.529759750" lastFinishedPulling="2026-01-21 11:34:29.345525149 +0000 UTC m=+2360.949417093" observedRunningTime="2026-01-21 11:34:30.730450436 +0000 UTC m=+2362.334342390" watchObservedRunningTime="2026-01-21 11:34:31.725832431 +0000 UTC m=+2363.329724355" Jan 21 11:34:31 crc kubenswrapper[4925]: I0121 11:34:31.855276 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:32 crc kubenswrapper[4925]: I0121 11:34:32.328016 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:33 crc kubenswrapper[4925]: I0121 11:34:33.569571 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.024906 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.137954 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-8mp9b"] Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.149507 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-db-sync-8mp9b"] Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.180775 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.181173 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="cinder-backup" containerID="cri-o://42b9bb1217ad33dfc895a34e7e1a745d8ab928a53f80c62aa34112aedbd0cba8" gracePeriod=30 Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.181288 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-backup-0" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="probe" containerID="cri-o://b27796d0dfc73426be7c8bdc5292b9d16905a6991ae8c4727f6037a52c0fe090" gracePeriod=30 Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.220094 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/cinderf1be-account-delete-wzlrr"] Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.221963 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.226775 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.227091 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="cinder-scheduler" containerID="cri-o://1847043b20c9cf78c82826a612b2fd1617792991fdade6bd26cd838a9a78b6ac" gracePeriod=30 Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.227206 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-scheduler-0" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="probe" containerID="cri-o://c812d7e7df3fcaa9f2a28b162cae8fa85d2294e82b636e6597cb913c48e97b27" gracePeriod=30 Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.252424 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.252792 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api-log" containerID="cri-o://c8a815bebc208c2f565ee345818ea5743202f1e3a5387978048ba369bb3dec61" gracePeriod=30 Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.252879 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/cinder-api-0" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" containerID="cri-o://346f72749d51a646b3de207c9d05e310bb1b0e14acd1813725fe77eb29e75e1b" gracePeriod=30 Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.290292 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinderf1be-account-delete-wzlrr"] Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.293332 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h9pq\" (UniqueName: \"kubernetes.io/projected/9dbf7f31-122f-4e64-ae7c-5669badd697e-kube-api-access-6h9pq\") pod \"cinderf1be-account-delete-wzlrr\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.293786 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dbf7f31-122f-4e64-ae7c-5669badd697e-operator-scripts\") pod \"cinderf1be-account-delete-wzlrr\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.404813 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h9pq\" (UniqueName: \"kubernetes.io/projected/9dbf7f31-122f-4e64-ae7c-5669badd697e-kube-api-access-6h9pq\") pod \"cinderf1be-account-delete-wzlrr\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.404973 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dbf7f31-122f-4e64-ae7c-5669badd697e-operator-scripts\") pod \"cinderf1be-account-delete-wzlrr\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.407101 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dbf7f31-122f-4e64-ae7c-5669badd697e-operator-scripts\") pod \"cinderf1be-account-delete-wzlrr\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.455522 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h9pq\" (UniqueName: \"kubernetes.io/projected/9dbf7f31-122f-4e64-ae7c-5669badd697e-kube-api-access-6h9pq\") pod \"cinderf1be-account-delete-wzlrr\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.567480 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.913245 4925 generic.go:334] "Generic (PLEG): container finished" podID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerID="c8a815bebc208c2f565ee345818ea5743202f1e3a5387978048ba369bb3dec61" exitCode=143 Jan 21 11:34:34 crc kubenswrapper[4925]: I0121 11:34:34.913623 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"57345c8f-262f-4a3d-812d-3e8c465a8216","Type":"ContainerDied","Data":"c8a815bebc208c2f565ee345818ea5743202f1e3a5387978048ba369bb3dec61"} Jan 21 11:34:35 crc kubenswrapper[4925]: I0121 11:34:35.335688 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:35 crc kubenswrapper[4925]: I0121 11:34:35.541299 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e843ee1-28fe-459d-9dd7-4a8b41127812" path="/var/lib/kubelet/pods/2e843ee1-28fe-459d-9dd7-4a8b41127812/volumes" Jan 21 11:34:35 crc kubenswrapper[4925]: I0121 11:34:35.608349 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/cinderf1be-account-delete-wzlrr"] Jan 21 11:34:35 crc kubenswrapper[4925]: I0121 11:34:35.924002 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" event={"ID":"9dbf7f31-122f-4e64-ae7c-5669badd697e","Type":"ContainerStarted","Data":"fd6e8e573247080f6fe03b76cf18c5be91ba1a5482d626abb6aeaca163ba27f5"} Jan 21 11:34:35 crc kubenswrapper[4925]: I0121 11:34:35.924063 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" event={"ID":"9dbf7f31-122f-4e64-ae7c-5669badd697e","Type":"ContainerStarted","Data":"2344a0f680535f44e33df8dd5565215ebce2e596e9dc5524e1df3a5377129153"} Jan 21 11:34:35 crc kubenswrapper[4925]: I0121 11:34:35.963911 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" podStartSLOduration=1.9638848580000001 podStartE2EDuration="1.963884858s" podCreationTimestamp="2026-01-21 11:34:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:34:35.959148629 +0000 UTC m=+2367.563040563" watchObservedRunningTime="2026-01-21 11:34:35.963884858 +0000 UTC m=+2367.567776792" Jan 21 11:34:36 crc kubenswrapper[4925]: I0121 11:34:36.723131 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:36 crc kubenswrapper[4925]: I0121 11:34:36.936987 4925 generic.go:334] "Generic (PLEG): container finished" podID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerID="b27796d0dfc73426be7c8bdc5292b9d16905a6991ae8c4727f6037a52c0fe090" exitCode=0 Jan 21 11:34:36 crc kubenswrapper[4925]: I0121 11:34:36.937079 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"bce0e839-af46-4bcc-a21a-91968ec1ace7","Type":"ContainerDied","Data":"b27796d0dfc73426be7c8bdc5292b9d16905a6991ae8c4727f6037a52c0fe090"} Jan 21 11:34:36 crc kubenswrapper[4925]: I0121 11:34:36.939291 4925 generic.go:334] "Generic (PLEG): container finished" podID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerID="c812d7e7df3fcaa9f2a28b162cae8fa85d2294e82b636e6597cb913c48e97b27" exitCode=0 Jan 21 11:34:36 crc kubenswrapper[4925]: I0121 11:34:36.939358 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"89909e4b-c27f-4cb5-817c-1c3d789b07d7","Type":"ContainerDied","Data":"c812d7e7df3fcaa9f2a28b162cae8fa85d2294e82b636e6597cb913c48e97b27"} Jan 21 11:34:36 crc kubenswrapper[4925]: I0121 11:34:36.941640 4925 generic.go:334] "Generic (PLEG): container finished" podID="9dbf7f31-122f-4e64-ae7c-5669badd697e" containerID="fd6e8e573247080f6fe03b76cf18c5be91ba1a5482d626abb6aeaca163ba27f5" exitCode=0 Jan 21 11:34:36 crc kubenswrapper[4925]: I0121 11:34:36.941714 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" event={"ID":"9dbf7f31-122f-4e64-ae7c-5669badd697e","Type":"ContainerDied","Data":"fd6e8e573247080f6fe03b76cf18c5be91ba1a5482d626abb6aeaca163ba27f5"} Jan 21 11:34:37 crc kubenswrapper[4925]: I0121 11:34:37.962750 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.104552 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/cinder-api-0" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.194:8776/healthcheck\": read tcp 10.217.0.2:32882->10.217.0.194:8776: read: connection reset by peer" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.578745 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.760526 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6h9pq\" (UniqueName: \"kubernetes.io/projected/9dbf7f31-122f-4e64-ae7c-5669badd697e-kube-api-access-6h9pq\") pod \"9dbf7f31-122f-4e64-ae7c-5669badd697e\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.761138 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dbf7f31-122f-4e64-ae7c-5669badd697e-operator-scripts\") pod \"9dbf7f31-122f-4e64-ae7c-5669badd697e\" (UID: \"9dbf7f31-122f-4e64-ae7c-5669badd697e\") " Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.762164 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dbf7f31-122f-4e64-ae7c-5669badd697e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9dbf7f31-122f-4e64-ae7c-5669badd697e" (UID: "9dbf7f31-122f-4e64-ae7c-5669badd697e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.762723 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dbf7f31-122f-4e64-ae7c-5669badd697e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.767738 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.768244 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="eb5761b2-6757-4aaf-8ecf-5de81cf90845" containerName="watcher-decision-engine" containerID="cri-o://ca16500b733b5186298e744deaf79e1611286bdda6fd8e8c280cd500a527f15a" gracePeriod=30 Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.821934 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dbf7f31-122f-4e64-ae7c-5669badd697e-kube-api-access-6h9pq" (OuterVolumeSpecName: "kube-api-access-6h9pq") pod "9dbf7f31-122f-4e64-ae7c-5669badd697e" (UID: "9dbf7f31-122f-4e64-ae7c-5669badd697e"). InnerVolumeSpecName "kube-api-access-6h9pq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.865271 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6h9pq\" (UniqueName: \"kubernetes.io/projected/9dbf7f31-122f-4e64-ae7c-5669badd697e-kube-api-access-6h9pq\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.968921 4925 generic.go:334] "Generic (PLEG): container finished" podID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerID="346f72749d51a646b3de207c9d05e310bb1b0e14acd1813725fe77eb29e75e1b" exitCode=0 Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.969039 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"57345c8f-262f-4a3d-812d-3e8c465a8216","Type":"ContainerDied","Data":"346f72749d51a646b3de207c9d05e310bb1b0e14acd1813725fe77eb29e75e1b"} Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.972543 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" event={"ID":"9dbf7f31-122f-4e64-ae7c-5669badd697e","Type":"ContainerDied","Data":"2344a0f680535f44e33df8dd5565215ebce2e596e9dc5524e1df3a5377129153"} Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.972590 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2344a0f680535f44e33df8dd5565215ebce2e596e9dc5524e1df3a5377129153" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.972619 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinderf1be-account-delete-wzlrr" Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.979533 4925 generic.go:334] "Generic (PLEG): container finished" podID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerID="42b9bb1217ad33dfc895a34e7e1a745d8ab928a53f80c62aa34112aedbd0cba8" exitCode=0 Jan 21 11:34:38 crc kubenswrapper[4925]: I0121 11:34:38.979599 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"bce0e839-af46-4bcc-a21a-91968ec1ace7","Type":"ContainerDied","Data":"42b9bb1217ad33dfc895a34e7e1a745d8ab928a53f80c62aa34112aedbd0cba8"} Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.374637 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.383018 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.383613 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-central-agent" containerID="cri-o://1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562" gracePeriod=30 Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.384538 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="proxy-httpd" containerID="cri-o://3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1" gracePeriod=30 Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.384955 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="sg-core" containerID="cri-o://a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc" gracePeriod=30 Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.385208 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-notification-agent" containerID="cri-o://791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24" gracePeriod=30 Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.406920 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-db-create-4fv8w"] Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.460614 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-db-create-4fv8w"] Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.475365 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.198:3000/\": EOF" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.488379 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinderf1be-account-delete-wzlrr"] Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.539261 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.560058 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c98b6c2-3cd8-4c48-8142-de9d6e11be9c" path="/var/lib/kubelet/pods/4c98b6c2-3cd8-4c48-8142-de9d6e11be9c/volumes" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.564283 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-f1be-account-create-update-hffks"] Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.568868 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinderf1be-account-delete-wzlrr"] Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.577296 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-f1be-account-create-update-hffks"] Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.688740 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.689204 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-scripts\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.689535 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data-custom\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.689777 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57345c8f-262f-4a3d-812d-3e8c465a8216-logs\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.689959 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hm2x\" (UniqueName: \"kubernetes.io/projected/57345c8f-262f-4a3d-812d-3e8c465a8216-kube-api-access-9hm2x\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.690076 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-internal-tls-certs\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.690210 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-cert-memcached-mtls\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.690428 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57345c8f-262f-4a3d-812d-3e8c465a8216-etc-machine-id\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.690625 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-combined-ca-bundle\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.690854 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-public-tls-certs\") pod \"57345c8f-262f-4a3d-812d-3e8c465a8216\" (UID: \"57345c8f-262f-4a3d-812d-3e8c465a8216\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.691387 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/57345c8f-262f-4a3d-812d-3e8c465a8216-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.691767 4925 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57345c8f-262f-4a3d-812d-3e8c465a8216-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.694984 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57345c8f-262f-4a3d-812d-3e8c465a8216-logs" (OuterVolumeSpecName: "logs") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.696543 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.700756 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-scripts" (OuterVolumeSpecName: "scripts") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.701641 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57345c8f-262f-4a3d-812d-3e8c465a8216-kube-api-access-9hm2x" (OuterVolumeSpecName: "kube-api-access-9hm2x") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "kube-api-access-9hm2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.712466 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.728495 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.792666 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-dev\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.792792 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-lib-cinder\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.792826 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-combined-ca-bundle\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.792893 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data-custom\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.792895 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-lib-cinder" (OuterVolumeSpecName: "var-lib-cinder") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "var-lib-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.792942 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-run\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793001 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-cinder\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793027 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-sys\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793055 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-lib-modules\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793060 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-run" (OuterVolumeSpecName: "run") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793086 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-scripts\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793100 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-sys" (OuterVolumeSpecName: "sys") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793125 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-cinder" (OuterVolumeSpecName: "var-locks-cinder") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "var-locks-cinder". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793127 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793220 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-nvme\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793278 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d82nl\" (UniqueName: \"kubernetes.io/projected/bce0e839-af46-4bcc-a21a-91968ec1ace7-kube-api-access-d82nl\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793295 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-brick\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793332 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-iscsi\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.792855 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-dev" (OuterVolumeSpecName: "dev") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793432 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-cert-memcached-mtls\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.793592 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-machine-id\") pod \"bce0e839-af46-4bcc-a21a-91968ec1ace7\" (UID: \"bce0e839-af46-4bcc-a21a-91968ec1ace7\") " Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794177 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794197 4925 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-dev\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794234 4925 reconciler_common.go:293] "Volume detached for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-lib-cinder\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794245 4925 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-run\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794255 4925 reconciler_common.go:293] "Volume detached for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-cinder\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794265 4925 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-sys\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794275 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794285 4925 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794298 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57345c8f-262f-4a3d-812d-3e8c465a8216-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794310 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hm2x\" (UniqueName: \"kubernetes.io/projected/57345c8f-262f-4a3d-812d-3e8c465a8216-kube-api-access-9hm2x\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794349 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.794384 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.795605 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.795690 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.795713 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.796108 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.801224 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.801420 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.807771 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-scripts" (OuterVolumeSpecName: "scripts") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.809660 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bce0e839-af46-4bcc-a21a-91968ec1ace7-kube-api-access-d82nl" (OuterVolumeSpecName: "kube-api-access-d82nl") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "kube-api-access-d82nl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:39 crc kubenswrapper[4925]: I0121 11:34:39.810533 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data" (OuterVolumeSpecName: "config-data") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049148 4925 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049190 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049200 4925 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049212 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049222 4925 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049231 4925 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049239 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d82nl\" (UniqueName: \"kubernetes.io/projected/bce0e839-af46-4bcc-a21a-91968ec1ace7-kube-api-access-d82nl\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049251 4925 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049258 4925 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049281 4925 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce0e839-af46-4bcc-a21a-91968ec1ace7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049291 4925 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.049667 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "57345c8f-262f-4a3d-812d-3e8c465a8216" (UID: "57345c8f-262f-4a3d-812d-3e8c465a8216"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.134885 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-backup-0" event={"ID":"bce0e839-af46-4bcc-a21a-91968ec1ace7","Type":"ContainerDied","Data":"4f21d7f74c3d1a9dd086b832223663c60c64ca87fcae44420fd84439391714d5"} Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.135331 4925 scope.go:117] "RemoveContainer" containerID="b27796d0dfc73426be7c8bdc5292b9d16905a6991ae8c4727f6037a52c0fe090" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.135581 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-backup-0" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.140682 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.160712 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/57345c8f-262f-4a3d-812d-3e8c465a8216-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.160739 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.293912 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-api-0" event={"ID":"57345c8f-262f-4a3d-812d-3e8c465a8216","Type":"ContainerDied","Data":"5b3162df7d56f37e0f88f31ccdb6d41d7e23c32aaaa0430d9ec252bd8ad3a5ae"} Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.294045 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-api-0" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.294812 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data" (OuterVolumeSpecName: "config-data") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.295968 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.350383 4925 generic.go:334] "Generic (PLEG): container finished" podID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerID="a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc" exitCode=2 Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.350525 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerDied","Data":"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc"} Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.378685 4925 generic.go:334] "Generic (PLEG): container finished" podID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerID="1847043b20c9cf78c82826a612b2fd1617792991fdade6bd26cd838a9a78b6ac" exitCode=0 Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.378740 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"89909e4b-c27f-4cb5-817c-1c3d789b07d7","Type":"ContainerDied","Data":"1847043b20c9cf78c82826a612b2fd1617792991fdade6bd26cd838a9a78b6ac"} Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.438664 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "bce0e839-af46-4bcc-a21a-91968ec1ace7" (UID: "bce0e839-af46-4bcc-a21a-91968ec1ace7"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.660421 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/bce0e839-af46-4bcc-a21a-91968ec1ace7-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.679526 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.700665 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.709185 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-api-0"] Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.726586 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.727049 4925 scope.go:117] "RemoveContainer" containerID="42b9bb1217ad33dfc895a34e7e1a745d8ab928a53f80c62aa34112aedbd0cba8" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.761692 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bx8vm\" (UniqueName: \"kubernetes.io/projected/89909e4b-c27f-4cb5-817c-1c3d789b07d7-kube-api-access-bx8vm\") pod \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.761767 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-combined-ca-bundle\") pod \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.761800 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-cert-memcached-mtls\") pod \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.761839 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data-custom\") pod \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.761955 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-scripts\") pod \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.761980 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/89909e4b-c27f-4cb5-817c-1c3d789b07d7-etc-machine-id\") pod \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.762007 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data\") pod \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\" (UID: \"89909e4b-c27f-4cb5-817c-1c3d789b07d7\") " Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.770009 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89909e4b-c27f-4cb5-817c-1c3d789b07d7-kube-api-access-bx8vm" (OuterVolumeSpecName: "kube-api-access-bx8vm") pod "89909e4b-c27f-4cb5-817c-1c3d789b07d7" (UID: "89909e4b-c27f-4cb5-817c-1c3d789b07d7"). InnerVolumeSpecName "kube-api-access-bx8vm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.770086 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/89909e4b-c27f-4cb5-817c-1c3d789b07d7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "89909e4b-c27f-4cb5-817c-1c3d789b07d7" (UID: "89909e4b-c27f-4cb5-817c-1c3d789b07d7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.777589 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "89909e4b-c27f-4cb5-817c-1c3d789b07d7" (UID: "89909e4b-c27f-4cb5-817c-1c3d789b07d7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.778599 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-scripts" (OuterVolumeSpecName: "scripts") pod "89909e4b-c27f-4cb5-817c-1c3d789b07d7" (UID: "89909e4b-c27f-4cb5-817c-1c3d789b07d7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.785096 4925 scope.go:117] "RemoveContainer" containerID="346f72749d51a646b3de207c9d05e310bb1b0e14acd1813725fe77eb29e75e1b" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.824482 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.832604 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-backup-0"] Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.849029 4925 scope.go:117] "RemoveContainer" containerID="c8a815bebc208c2f565ee345818ea5743202f1e3a5387978048ba369bb3dec61" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.864513 4925 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.864560 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.864587 4925 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/89909e4b-c27f-4cb5-817c-1c3d789b07d7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.864600 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bx8vm\" (UniqueName: \"kubernetes.io/projected/89909e4b-c27f-4cb5-817c-1c3d789b07d7-kube-api-access-bx8vm\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.865516 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89909e4b-c27f-4cb5-817c-1c3d789b07d7" (UID: "89909e4b-c27f-4cb5-817c-1c3d789b07d7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.926706 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data" (OuterVolumeSpecName: "config-data") pod "89909e4b-c27f-4cb5-817c-1c3d789b07d7" (UID: "89909e4b-c27f-4cb5-817c-1c3d789b07d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.966837 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:40 crc kubenswrapper[4925]: I0121 11:34:40.966889 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.004622 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "89909e4b-c27f-4cb5-817c-1c3d789b07d7" (UID: "89909e4b-c27f-4cb5-817c-1c3d789b07d7"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.051086 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-79xx6"] Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.061260 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/keystone-bootstrap-79xx6"] Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.067989 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/89909e4b-c27f-4cb5-817c-1c3d789b07d7-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.137668 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.168807 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-run-httpd\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.168954 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-sg-core-conf-yaml\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.169047 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-ceilometer-tls-certs\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.169089 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-scripts\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.169109 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-log-httpd\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.169141 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6x4n\" (UniqueName: \"kubernetes.io/projected/f296c8cc-70c9-4e34-80ef-1741d76e96ee-kube-api-access-q6x4n\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.169181 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-config-data\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.169202 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-combined-ca-bundle\") pod \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\" (UID: \"f296c8cc-70c9-4e34-80ef-1741d76e96ee\") " Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.170868 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.171144 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.187024 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f296c8cc-70c9-4e34-80ef-1741d76e96ee-kube-api-access-q6x4n" (OuterVolumeSpecName: "kube-api-access-q6x4n") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "kube-api-access-q6x4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.188649 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-scripts" (OuterVolumeSpecName: "scripts") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.249440 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.258267 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.271189 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.271516 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.271590 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.271709 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6x4n\" (UniqueName: \"kubernetes.io/projected/f296c8cc-70c9-4e34-80ef-1741d76e96ee-kube-api-access-q6x4n\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.271797 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f296c8cc-70c9-4e34-80ef-1741d76e96ee-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.271857 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.299718 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.341657 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-config-data" (OuterVolumeSpecName: "config-data") pod "f296c8cc-70c9-4e34-80ef-1741d76e96ee" (UID: "f296c8cc-70c9-4e34-80ef-1741d76e96ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.373584 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.373923 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f296c8cc-70c9-4e34-80ef-1741d76e96ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395020 4925 generic.go:334] "Generic (PLEG): container finished" podID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerID="3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1" exitCode=0 Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395339 4925 generic.go:334] "Generic (PLEG): container finished" podID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerID="791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24" exitCode=0 Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395514 4925 generic.go:334] "Generic (PLEG): container finished" podID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerID="1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562" exitCode=0 Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395102 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395105 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerDied","Data":"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1"} Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395888 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerDied","Data":"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24"} Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395909 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerDied","Data":"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562"} Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395921 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f296c8cc-70c9-4e34-80ef-1741d76e96ee","Type":"ContainerDied","Data":"5a0a170d9b17155c40e968ff7e68150786f34dcf5aea2de2e89b598d4275b0d0"} Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.395942 4925 scope.go:117] "RemoveContainer" containerID="3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.425462 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/cinder-scheduler-0" event={"ID":"89909e4b-c27f-4cb5-817c-1c3d789b07d7","Type":"ContainerDied","Data":"85d4acc8676179ffa1ada040da588556deacdfc0cee1f70f22796500b7b6fa52"} Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.425600 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/cinder-scheduler-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.425857 4925 scope.go:117] "RemoveContainer" containerID="a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.464289 4925 scope.go:117] "RemoveContainer" containerID="791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.466305 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.492938 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.510217 4925 scope.go:117] "RemoveContainer" containerID="1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.698754 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" path="/var/lib/kubelet/pods/57345c8f-262f-4a3d-812d-3e8c465a8216/volumes" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.702976 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dbf7f31-122f-4e64-ae7c-5669badd697e" path="/var/lib/kubelet/pods/9dbf7f31-122f-4e64-ae7c-5669badd697e/volumes" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.703784 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2164931-e4db-447d-9821-118982670bdc" path="/var/lib/kubelet/pods/b2164931-e4db-447d-9821-118982670bdc/volumes" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.704628 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" path="/var/lib/kubelet/pods/bce0e839-af46-4bcc-a21a-91968ec1ace7/volumes" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.706136 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531" path="/var/lib/kubelet/pods/dd111bcd-b42f-4a58-9e6c-b8ce7e4e7531/volumes" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.708848 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" path="/var/lib/kubelet/pods/f296c8cc-70c9-4e34-80ef-1741d76e96ee/volumes" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.726604 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.726671 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/cinder-scheduler-0"] Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.738472 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.739477 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-central-agent" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.739504 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-central-agent" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.739526 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="proxy-httpd" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.739536 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="proxy-httpd" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.739686 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="cinder-backup" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.739778 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="cinder-backup" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.739866 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="probe" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.739877 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="probe" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.739888 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dbf7f31-122f-4e64-ae7c-5669badd697e" containerName="mariadb-account-delete" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.739896 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dbf7f31-122f-4e64-ae7c-5669badd697e" containerName="mariadb-account-delete" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.739935 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="cinder-scheduler" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.739944 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="cinder-scheduler" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.739978 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="probe" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.739986 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="probe" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.740022 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api-log" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740031 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api-log" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.740051 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-notification-agent" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740061 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-notification-agent" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.740086 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740093 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.740109 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="sg-core" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740122 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="sg-core" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740027 4925 scope.go:117] "RemoveContainer" containerID="3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740908 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="probe" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740938 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="proxy-httpd" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740952 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="probe" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740965 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-central-agent" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740982 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dbf7f31-122f-4e64-ae7c-5669badd697e" containerName="mariadb-account-delete" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.740998 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce0e839-af46-4bcc-a21a-91968ec1ace7" containerName="cinder-backup" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.741011 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.741029 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="ceilometer-notification-agent" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.741036 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="57345c8f-262f-4a3d-812d-3e8c465a8216" containerName="cinder-api-log" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.741054 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f296c8cc-70c9-4e34-80ef-1741d76e96ee" containerName="sg-core" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.741067 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" containerName="cinder-scheduler" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.743972 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1\": container with ID starting with 3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1 not found: ID does not exist" containerID="3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.744031 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1"} err="failed to get container status \"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1\": rpc error: code = NotFound desc = could not find container \"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1\": container with ID starting with 3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.744083 4925 scope.go:117] "RemoveContainer" containerID="a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.744515 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.745319 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc\": container with ID starting with a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc not found: ID does not exist" containerID="a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.745421 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc"} err="failed to get container status \"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc\": rpc error: code = NotFound desc = could not find container \"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc\": container with ID starting with a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.745514 4925 scope.go:117] "RemoveContainer" containerID="791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.764153 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.764755 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.764781 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24\": container with ID starting with 791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24 not found: ID does not exist" containerID="791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.764864 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24"} err="failed to get container status \"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24\": rpc error: code = NotFound desc = could not find container \"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24\": container with ID starting with 791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.764908 4925 scope.go:117] "RemoveContainer" containerID="1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.765071 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.766088 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:34:41 crc kubenswrapper[4925]: E0121 11:34:41.768797 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562\": container with ID starting with 1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562 not found: ID does not exist" containerID="1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.768871 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562"} err="failed to get container status \"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562\": rpc error: code = NotFound desc = could not find container \"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562\": container with ID starting with 1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.768913 4925 scope.go:117] "RemoveContainer" containerID="3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.769633 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1"} err="failed to get container status \"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1\": rpc error: code = NotFound desc = could not find container \"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1\": container with ID starting with 3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.769670 4925 scope.go:117] "RemoveContainer" containerID="a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.770144 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc"} err="failed to get container status \"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc\": rpc error: code = NotFound desc = could not find container \"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc\": container with ID starting with a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.770178 4925 scope.go:117] "RemoveContainer" containerID="791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.771082 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24"} err="failed to get container status \"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24\": rpc error: code = NotFound desc = could not find container \"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24\": container with ID starting with 791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.771144 4925 scope.go:117] "RemoveContainer" containerID="1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.771709 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562"} err="failed to get container status \"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562\": rpc error: code = NotFound desc = could not find container \"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562\": container with ID starting with 1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.771734 4925 scope.go:117] "RemoveContainer" containerID="3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.772182 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1"} err="failed to get container status \"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1\": rpc error: code = NotFound desc = could not find container \"3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1\": container with ID starting with 3632b6631edd8dcc9c9e3454a29fb2f23dbda4c4e95ec5d7364300681cd01ae1 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.772211 4925 scope.go:117] "RemoveContainer" containerID="a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.774266 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc"} err="failed to get container status \"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc\": rpc error: code = NotFound desc = could not find container \"a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc\": container with ID starting with a8acf20bac4670ce10d5a9a81f3c3d79097c2ffba4789de36e869bac8fc82ebc not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.774301 4925 scope.go:117] "RemoveContainer" containerID="791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.774916 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24"} err="failed to get container status \"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24\": rpc error: code = NotFound desc = could not find container \"791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24\": container with ID starting with 791c8b9e2d18c1e2b2779e7d6fe972e8562eceafb2380ce5527df7aad5e76d24 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.774988 4925 scope.go:117] "RemoveContainer" containerID="1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.775586 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562"} err="failed to get container status \"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562\": rpc error: code = NotFound desc = could not find container \"1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562\": container with ID starting with 1c5562b5c957227d3f3fa0af71337adbc9fbaa29b1f9a9c4955a024ce951a562 not found: ID does not exist" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.775627 4925 scope.go:117] "RemoveContainer" containerID="c812d7e7df3fcaa9f2a28b162cae8fa85d2294e82b636e6597cb913c48e97b27" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.800739 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.800823 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.800872 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-run-httpd\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.800902 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.800944 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-scripts\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.800964 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-log-httpd\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.801000 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msmmg\" (UniqueName: \"kubernetes.io/projected/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-kube-api-access-msmmg\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.801020 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-config-data\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.825104 4925 scope.go:117] "RemoveContainer" containerID="1847043b20c9cf78c82826a612b2fd1617792991fdade6bd26cd838a9a78b6ac" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903188 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903297 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903374 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-run-httpd\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903439 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903493 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-scripts\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903513 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-log-httpd\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903553 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msmmg\" (UniqueName: \"kubernetes.io/projected/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-kube-api-access-msmmg\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.903576 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-config-data\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.904095 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-run-httpd\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.904158 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-log-httpd\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.909796 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-scripts\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.915267 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.915601 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.915956 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-config-data\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.920189 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.930787 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msmmg\" (UniqueName: \"kubernetes.io/projected/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-kube-api-access-msmmg\") pod \"ceilometer-0\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:41 crc kubenswrapper[4925]: I0121 11:34:41.971761 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:42 crc kubenswrapper[4925]: I0121 11:34:42.093583 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:42 crc kubenswrapper[4925]: I0121 11:34:42.639741 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.296126 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.656735 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89909e4b-c27f-4cb5-817c-1c3d789b07d7" path="/var/lib/kubelet/pods/89909e4b-c27f-4cb5-817c-1c3d789b07d7/volumes" Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.671718 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerStarted","Data":"a1b8c8ddbbc8fe4bf61d6a05da140bbc0063c0c0c45a2a937508dd6ab672d677"} Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.941812 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vzq5s"] Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.945112 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.961343 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vzq5s"] Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.962927 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlz7f\" (UniqueName: \"kubernetes.io/projected/bd8d2220-5a18-48db-9492-e92a8033c860-kube-api-access-nlz7f\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.963100 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-catalog-content\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:43 crc kubenswrapper[4925]: I0121 11:34:43.963157 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-utilities\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.065313 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlz7f\" (UniqueName: \"kubernetes.io/projected/bd8d2220-5a18-48db-9492-e92a8033c860-kube-api-access-nlz7f\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.065535 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-catalog-content\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.065594 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-utilities\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.066451 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-utilities\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.066493 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-catalog-content\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.087057 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlz7f\" (UniqueName: \"kubernetes.io/projected/bd8d2220-5a18-48db-9492-e92a8033c860-kube-api-access-nlz7f\") pod \"redhat-marketplace-vzq5s\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.335253 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.605526 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:44 crc kubenswrapper[4925]: I0121 11:34:44.748701 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerStarted","Data":"0486ec5470f960b248dfe9771ef155ee6230ce9ca2994bc3e55ecfff96cab169"} Jan 21 11:34:45 crc kubenswrapper[4925]: I0121 11:34:45.112834 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vzq5s"] Jan 21 11:34:45 crc kubenswrapper[4925]: I0121 11:34:45.874095 4925 generic.go:334] "Generic (PLEG): container finished" podID="bd8d2220-5a18-48db-9492-e92a8033c860" containerID="589148156dc33205bcc63afccc1aa1e5f9cb136d8f6ceea1e1669cc3fdf8c5e8" exitCode=0 Jan 21 11:34:45 crc kubenswrapper[4925]: I0121 11:34:45.874653 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vzq5s" event={"ID":"bd8d2220-5a18-48db-9492-e92a8033c860","Type":"ContainerDied","Data":"589148156dc33205bcc63afccc1aa1e5f9cb136d8f6ceea1e1669cc3fdf8c5e8"} Jan 21 11:34:45 crc kubenswrapper[4925]: I0121 11:34:45.874695 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vzq5s" event={"ID":"bd8d2220-5a18-48db-9492-e92a8033c860","Type":"ContainerStarted","Data":"6126cbb8e41d5607264e657ab8986bb155b62dadca9af90aece7fdf4fa573d9f"} Jan 21 11:34:45 crc kubenswrapper[4925]: I0121 11:34:45.883847 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerStarted","Data":"6156d77e752c61225e50e138bc025ff5a43a45248657085461f0b5b0356843c1"} Jan 21 11:34:45 crc kubenswrapper[4925]: I0121 11:34:45.897590 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:46 crc kubenswrapper[4925]: I0121 11:34:46.918726 4925 generic.go:334] "Generic (PLEG): container finished" podID="eb5761b2-6757-4aaf-8ecf-5de81cf90845" containerID="ca16500b733b5186298e744deaf79e1611286bdda6fd8e8c280cd500a527f15a" exitCode=0 Jan 21 11:34:46 crc kubenswrapper[4925]: I0121 11:34:46.918931 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"eb5761b2-6757-4aaf-8ecf-5de81cf90845","Type":"ContainerDied","Data":"ca16500b733b5186298e744deaf79e1611286bdda6fd8e8c280cd500a527f15a"} Jan 21 11:34:46 crc kubenswrapper[4925]: I0121 11:34:46.919293 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"eb5761b2-6757-4aaf-8ecf-5de81cf90845","Type":"ContainerDied","Data":"81700ca1086dd46af2104c3065d557ff46ef374e406ed0ae5bc5c864b4a6f004"} Jan 21 11:34:46 crc kubenswrapper[4925]: I0121 11:34:46.919313 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81700ca1086dd46af2104c3065d557ff46ef374e406ed0ae5bc5c864b4a6f004" Jan 21 11:34:46 crc kubenswrapper[4925]: I0121 11:34:46.925383 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerStarted","Data":"7d1dac6fce5084d483bbc177ef60f71ee934ffefb2e8330801faf82735c839eb"} Jan 21 11:34:46 crc kubenswrapper[4925]: I0121 11:34:46.932091 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.004917 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eb5761b2-6757-4aaf-8ecf-5de81cf90845-logs\") pod \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.004972 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-config-data\") pod \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.005023 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-combined-ca-bundle\") pod \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.005071 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6wst\" (UniqueName: \"kubernetes.io/projected/eb5761b2-6757-4aaf-8ecf-5de81cf90845-kube-api-access-f6wst\") pod \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.005103 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-cert-memcached-mtls\") pod \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.005162 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-custom-prometheus-ca\") pod \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\" (UID: \"eb5761b2-6757-4aaf-8ecf-5de81cf90845\") " Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.007403 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb5761b2-6757-4aaf-8ecf-5de81cf90845-logs" (OuterVolumeSpecName: "logs") pod "eb5761b2-6757-4aaf-8ecf-5de81cf90845" (UID: "eb5761b2-6757-4aaf-8ecf-5de81cf90845"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.020757 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb5761b2-6757-4aaf-8ecf-5de81cf90845-kube-api-access-f6wst" (OuterVolumeSpecName: "kube-api-access-f6wst") pod "eb5761b2-6757-4aaf-8ecf-5de81cf90845" (UID: "eb5761b2-6757-4aaf-8ecf-5de81cf90845"). InnerVolumeSpecName "kube-api-access-f6wst". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.304572 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eb5761b2-6757-4aaf-8ecf-5de81cf90845-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.304635 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6wst\" (UniqueName: \"kubernetes.io/projected/eb5761b2-6757-4aaf-8ecf-5de81cf90845-kube-api-access-f6wst\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.317142 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "eb5761b2-6757-4aaf-8ecf-5de81cf90845" (UID: "eb5761b2-6757-4aaf-8ecf-5de81cf90845"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.352606 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-config-data" (OuterVolumeSpecName: "config-data") pod "eb5761b2-6757-4aaf-8ecf-5de81cf90845" (UID: "eb5761b2-6757-4aaf-8ecf-5de81cf90845"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.355999 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb5761b2-6757-4aaf-8ecf-5de81cf90845" (UID: "eb5761b2-6757-4aaf-8ecf-5de81cf90845"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.406437 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.406471 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.406502 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.410794 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_eb5761b2-6757-4aaf-8ecf-5de81cf90845/watcher-decision-engine/0.log" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.420488 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "eb5761b2-6757-4aaf-8ecf-5de81cf90845" (UID: "eb5761b2-6757-4aaf-8ecf-5de81cf90845"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.507931 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/eb5761b2-6757-4aaf-8ecf-5de81cf90845-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.935152 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.969037 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.981418 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.989929 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:47 crc kubenswrapper[4925]: E0121 11:34:47.990332 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb5761b2-6757-4aaf-8ecf-5de81cf90845" containerName="watcher-decision-engine" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.990351 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb5761b2-6757-4aaf-8ecf-5de81cf90845" containerName="watcher-decision-engine" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.990566 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb5761b2-6757-4aaf-8ecf-5de81cf90845" containerName="watcher-decision-engine" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.991245 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:47 crc kubenswrapper[4925]: I0121 11:34:47.993599 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.018135 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a69deccc-7052-4059-b4fe-fef0f8e35b0c-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.018191 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.018238 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44mk4\" (UniqueName: \"kubernetes.io/projected/a69deccc-7052-4059-b4fe-fef0f8e35b0c-kube-api-access-44mk4\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.018301 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.018327 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.018674 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.020793 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.119940 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a69deccc-7052-4059-b4fe-fef0f8e35b0c-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.120292 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.120323 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44mk4\" (UniqueName: \"kubernetes.io/projected/a69deccc-7052-4059-b4fe-fef0f8e35b0c-kube-api-access-44mk4\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.120364 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.120385 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.120461 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.120551 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a69deccc-7052-4059-b4fe-fef0f8e35b0c-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.125209 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.125245 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.125765 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.140222 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44mk4\" (UniqueName: \"kubernetes.io/projected/a69deccc-7052-4059-b4fe-fef0f8e35b0c-kube-api-access-44mk4\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.140378 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.442584 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:48 crc kubenswrapper[4925]: I0121 11:34:48.956593 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:34:49 crc kubenswrapper[4925]: I0121 11:34:49.690431 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb5761b2-6757-4aaf-8ecf-5de81cf90845" path="/var/lib/kubelet/pods/eb5761b2-6757-4aaf-8ecf-5de81cf90845/volumes" Jan 21 11:34:49 crc kubenswrapper[4925]: I0121 11:34:49.961153 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a69deccc-7052-4059-b4fe-fef0f8e35b0c","Type":"ContainerStarted","Data":"b659df86823bcdcb2cad2ec467bb1167e88c94a0094896e9de3d0e032d678d23"} Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.004582 4925 generic.go:334] "Generic (PLEG): container finished" podID="bd8d2220-5a18-48db-9492-e92a8033c860" containerID="675d805878270f86732a88bd5a2d37d546821329662ef0efb209fdf85bcdb226" exitCode=0 Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.005020 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vzq5s" event={"ID":"bd8d2220-5a18-48db-9492-e92a8033c860","Type":"ContainerDied","Data":"675d805878270f86732a88bd5a2d37d546821329662ef0efb209fdf85bcdb226"} Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.009685 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a69deccc-7052-4059-b4fe-fef0f8e35b0c","Type":"ContainerStarted","Data":"041c6440e774981b1fe67a67d852feaf1edeff8507de50d02ecd212ae10a3ee3"} Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.019436 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerStarted","Data":"b44ffd36c818a8f783858141a417432505a77f51a43a220fecf55e505fc3ccf0"} Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.019650 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.087010 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=3.055986943 podStartE2EDuration="13.086991054s" podCreationTimestamp="2026-01-21 11:34:41 +0000 UTC" firstStartedPulling="2026-01-21 11:34:42.64675286 +0000 UTC m=+2374.250644794" lastFinishedPulling="2026-01-21 11:34:52.677756971 +0000 UTC m=+2384.281648905" observedRunningTime="2026-01-21 11:34:54.080857921 +0000 UTC m=+2385.684749855" watchObservedRunningTime="2026-01-21 11:34:54.086991054 +0000 UTC m=+2385.690882988" Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.136853 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=7.136826186 podStartE2EDuration="7.136826186s" podCreationTimestamp="2026-01-21 11:34:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:34:54.128549935 +0000 UTC m=+2385.732441869" watchObservedRunningTime="2026-01-21 11:34:54.136826186 +0000 UTC m=+2385.740718130" Jan 21 11:34:54 crc kubenswrapper[4925]: I0121 11:34:54.716097 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:34:56 crc kubenswrapper[4925]: I0121 11:34:56.003737 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:34:57 crc kubenswrapper[4925]: I0121 11:34:57.256272 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:34:58 crc kubenswrapper[4925]: I0121 11:34:58.444157 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:58 crc kubenswrapper[4925]: I0121 11:34:58.477825 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:58 crc kubenswrapper[4925]: I0121 11:34:58.524212 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:34:59 crc kubenswrapper[4925]: I0121 11:34:59.075531 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vzq5s" event={"ID":"bd8d2220-5a18-48db-9492-e92a8033c860","Type":"ContainerStarted","Data":"f58fe1de40b4418913300b75185750283ca91971005665c64754e92b4197c09b"} Jan 21 11:34:59 crc kubenswrapper[4925]: I0121 11:34:59.075620 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:59 crc kubenswrapper[4925]: I0121 11:34:59.105880 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:34:59 crc kubenswrapper[4925]: I0121 11:34:59.938019 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:35:00 crc kubenswrapper[4925]: I0121 11:35:00.115071 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vzq5s" podStartSLOduration=5.355539749 podStartE2EDuration="17.11503724s" podCreationTimestamp="2026-01-21 11:34:43 +0000 UTC" firstStartedPulling="2026-01-21 11:34:45.879669315 +0000 UTC m=+2377.483561249" lastFinishedPulling="2026-01-21 11:34:57.639166806 +0000 UTC m=+2389.243058740" observedRunningTime="2026-01-21 11:35:00.110447766 +0000 UTC m=+2391.714339720" watchObservedRunningTime="2026-01-21 11:35:00.11503724 +0000 UTC m=+2391.718929174" Jan 21 11:35:02 crc kubenswrapper[4925]: I0121 11:35:01.183425 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:35:02 crc kubenswrapper[4925]: I0121 11:35:02.548274 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:35:03 crc kubenswrapper[4925]: I0121 11:35:03.805371 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:35:04 crc kubenswrapper[4925]: I0121 11:35:04.402226 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:35:04 crc kubenswrapper[4925]: I0121 11:35:04.443671 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:35:04 crc kubenswrapper[4925]: I0121 11:35:04.499888 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.042486 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_watcher-kuttl-decision-engine-0_a69deccc-7052-4059-b4fe-fef0f8e35b0c/watcher-decision-engine/0.log" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.191706 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-497rl"] Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.231311 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-497rl"] Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.283493 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher50a1-account-delete-qkq9x"] Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.285763 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.313965 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher50a1-account-delete-qkq9x"] Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.337157 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.337750 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="9346c552-eed6-447d-aa74-db8a61311a9b" containerName="watcher-applier" containerID="cri-o://bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1" gracePeriod=30 Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.340742 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11206d56-c989-474c-9b79-00449dacd0d7-operator-scripts\") pod \"watcher50a1-account-delete-qkq9x\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.341292 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttw7q\" (UniqueName: \"kubernetes.io/projected/11206d56-c989-474c-9b79-00449dacd0d7-kube-api-access-ttw7q\") pod \"watcher50a1-account-delete-qkq9x\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.409072 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.409425 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="a69deccc-7052-4059-b4fe-fef0f8e35b0c" containerName="watcher-decision-engine" containerID="cri-o://041c6440e774981b1fe67a67d852feaf1edeff8507de50d02ecd212ae10a3ee3" gracePeriod=30 Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.442767 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttw7q\" (UniqueName: \"kubernetes.io/projected/11206d56-c989-474c-9b79-00449dacd0d7-kube-api-access-ttw7q\") pod \"watcher50a1-account-delete-qkq9x\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.442885 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11206d56-c989-474c-9b79-00449dacd0d7-operator-scripts\") pod \"watcher50a1-account-delete-qkq9x\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.443912 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11206d56-c989-474c-9b79-00449dacd0d7-operator-scripts\") pod \"watcher50a1-account-delete-qkq9x\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.443986 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.444272 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-kuttl-api-log" containerID="cri-o://e003e47f4e073c6511a2c5833d0639ee8690b5bdb3e27339f49ffc842d7e7e88" gracePeriod=30 Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.444419 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-api" containerID="cri-o://e7b7f1d24deaf19f6642fd47c1c4d5ce271a668f758b8d0664a02194010895f4" gracePeriod=30 Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.713080 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttw7q\" (UniqueName: \"kubernetes.io/projected/11206d56-c989-474c-9b79-00449dacd0d7-kube-api-access-ttw7q\") pod \"watcher50a1-account-delete-qkq9x\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.718910 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0742dce7-0bc4-4c67-a1fa-abfa1921d449" path="/var/lib/kubelet/pods/0742dce7-0bc4-4c67-a1fa-abfa1921d449/volumes" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.817042 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:35:05 crc kubenswrapper[4925]: I0121 11:35:05.915891 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:06 crc kubenswrapper[4925]: I0121 11:35:06.750803 4925 generic.go:334] "Generic (PLEG): container finished" podID="e8ab333d-b735-413d-a903-c8a215353127" containerID="e003e47f4e073c6511a2c5833d0639ee8690b5bdb3e27339f49ffc842d7e7e88" exitCode=143 Jan 21 11:35:06 crc kubenswrapper[4925]: I0121 11:35:06.750942 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"e8ab333d-b735-413d-a903-c8a215353127","Type":"ContainerDied","Data":"e003e47f4e073c6511a2c5833d0639ee8690b5bdb3e27339f49ffc842d7e7e88"} Jan 21 11:35:06 crc kubenswrapper[4925]: I0121 11:35:06.938651 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher50a1-account-delete-qkq9x"] Jan 21 11:35:07 crc kubenswrapper[4925]: E0121 11:35:07.741784 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:35:07 crc kubenswrapper[4925]: E0121 11:35:07.743912 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:35:07 crc kubenswrapper[4925]: E0121 11:35:07.747932 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:35:07 crc kubenswrapper[4925]: E0121 11:35:07.748055 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="9346c552-eed6-447d-aa74-db8a61311a9b" containerName="watcher-applier" Jan 21 11:35:07 crc kubenswrapper[4925]: I0121 11:35:07.763313 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" event={"ID":"11206d56-c989-474c-9b79-00449dacd0d7","Type":"ContainerStarted","Data":"44a01aeb3a8e4af907e3abea96d300314731378a6e35f4267b229d1af5abb1bc"} Jan 21 11:35:07 crc kubenswrapper[4925]: I0121 11:35:07.763380 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" event={"ID":"11206d56-c989-474c-9b79-00449dacd0d7","Type":"ContainerStarted","Data":"8f3a508402a5c042c8276f720742c3894086d53d0ea54ff63985deb316921ffd"} Jan 21 11:35:07 crc kubenswrapper[4925]: I0121 11:35:07.821548 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.183:9322/\": read tcp 10.217.0.2:43314->10.217.0.183:9322: read: connection reset by peer" Jan 21 11:35:07 crc kubenswrapper[4925]: I0121 11:35:07.821602 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.183:9322/\": read tcp 10.217.0.2:43328->10.217.0.183:9322: read: connection reset by peer" Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.202955 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" podStartSLOduration=3.202892463 podStartE2EDuration="3.202892463s" podCreationTimestamp="2026-01-21 11:35:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:35:07.78478612 +0000 UTC m=+2399.388678074" watchObservedRunningTime="2026-01-21 11:35:08.202892463 +0000 UTC m=+2399.806784397" Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.205089 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vzq5s"] Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.205604 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vzq5s" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="registry-server" containerID="cri-o://f58fe1de40b4418913300b75185750283ca91971005665c64754e92b4197c09b" gracePeriod=2 Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.789239 4925 generic.go:334] "Generic (PLEG): container finished" podID="11206d56-c989-474c-9b79-00449dacd0d7" containerID="44a01aeb3a8e4af907e3abea96d300314731378a6e35f4267b229d1af5abb1bc" exitCode=0 Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.789539 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" event={"ID":"11206d56-c989-474c-9b79-00449dacd0d7","Type":"ContainerDied","Data":"44a01aeb3a8e4af907e3abea96d300314731378a6e35f4267b229d1af5abb1bc"} Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.802376 4925 generic.go:334] "Generic (PLEG): container finished" podID="e8ab333d-b735-413d-a903-c8a215353127" containerID="e7b7f1d24deaf19f6642fd47c1c4d5ce271a668f758b8d0664a02194010895f4" exitCode=0 Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.802459 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"e8ab333d-b735-413d-a903-c8a215353127","Type":"ContainerDied","Data":"e7b7f1d24deaf19f6642fd47c1c4d5ce271a668f758b8d0664a02194010895f4"} Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.815532 4925 generic.go:334] "Generic (PLEG): container finished" podID="a69deccc-7052-4059-b4fe-fef0f8e35b0c" containerID="041c6440e774981b1fe67a67d852feaf1edeff8507de50d02ecd212ae10a3ee3" exitCode=0 Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.815688 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a69deccc-7052-4059-b4fe-fef0f8e35b0c","Type":"ContainerDied","Data":"041c6440e774981b1fe67a67d852feaf1edeff8507de50d02ecd212ae10a3ee3"} Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.820588 4925 generic.go:334] "Generic (PLEG): container finished" podID="9346c552-eed6-447d-aa74-db8a61311a9b" containerID="bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1" exitCode=0 Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.820690 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"9346c552-eed6-447d-aa74-db8a61311a9b","Type":"ContainerDied","Data":"bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1"} Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.838746 4925 generic.go:334] "Generic (PLEG): container finished" podID="bd8d2220-5a18-48db-9492-e92a8033c860" containerID="f58fe1de40b4418913300b75185750283ca91971005665c64754e92b4197c09b" exitCode=0 Jan 21 11:35:08 crc kubenswrapper[4925]: I0121 11:35:08.838781 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vzq5s" event={"ID":"bd8d2220-5a18-48db-9492-e92a8033c860","Type":"ContainerDied","Data":"f58fe1de40b4418913300b75185750283ca91971005665c64754e92b4197c09b"} Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.318486 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.375792 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.508835 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a69deccc-7052-4059-b4fe-fef0f8e35b0c-logs\") pod \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.508959 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-combined-ca-bundle\") pod \"9346c552-eed6-447d-aa74-db8a61311a9b\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509028 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data\") pod \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509074 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hfnz\" (UniqueName: \"kubernetes.io/projected/9346c552-eed6-447d-aa74-db8a61311a9b-kube-api-access-5hfnz\") pod \"9346c552-eed6-447d-aa74-db8a61311a9b\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509121 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-custom-prometheus-ca\") pod \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509188 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-cert-memcached-mtls\") pod \"9346c552-eed6-447d-aa74-db8a61311a9b\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509219 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-combined-ca-bundle\") pod \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509364 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44mk4\" (UniqueName: \"kubernetes.io/projected/a69deccc-7052-4059-b4fe-fef0f8e35b0c-kube-api-access-44mk4\") pod \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509435 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9346c552-eed6-447d-aa74-db8a61311a9b-logs\") pod \"9346c552-eed6-447d-aa74-db8a61311a9b\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509475 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-cert-memcached-mtls\") pod \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.509505 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-config-data\") pod \"9346c552-eed6-447d-aa74-db8a61311a9b\" (UID: \"9346c552-eed6-447d-aa74-db8a61311a9b\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.531676 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a69deccc-7052-4059-b4fe-fef0f8e35b0c-logs" (OuterVolumeSpecName: "logs") pod "a69deccc-7052-4059-b4fe-fef0f8e35b0c" (UID: "a69deccc-7052-4059-b4fe-fef0f8e35b0c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.544549 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a69deccc-7052-4059-b4fe-fef0f8e35b0c-kube-api-access-44mk4" (OuterVolumeSpecName: "kube-api-access-44mk4") pod "a69deccc-7052-4059-b4fe-fef0f8e35b0c" (UID: "a69deccc-7052-4059-b4fe-fef0f8e35b0c"). InnerVolumeSpecName "kube-api-access-44mk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.554992 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9346c552-eed6-447d-aa74-db8a61311a9b-logs" (OuterVolumeSpecName: "logs") pod "9346c552-eed6-447d-aa74-db8a61311a9b" (UID: "9346c552-eed6-447d-aa74-db8a61311a9b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.560386 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9346c552-eed6-447d-aa74-db8a61311a9b-kube-api-access-5hfnz" (OuterVolumeSpecName: "kube-api-access-5hfnz") pod "9346c552-eed6-447d-aa74-db8a61311a9b" (UID: "9346c552-eed6-447d-aa74-db8a61311a9b"). InnerVolumeSpecName "kube-api-access-5hfnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.584633 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "a69deccc-7052-4059-b4fe-fef0f8e35b0c" (UID: "a69deccc-7052-4059-b4fe-fef0f8e35b0c"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.613645 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data" (OuterVolumeSpecName: "config-data") pod "a69deccc-7052-4059-b4fe-fef0f8e35b0c" (UID: "a69deccc-7052-4059-b4fe-fef0f8e35b0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.614514 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data\") pod \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\" (UID: \"a69deccc-7052-4059-b4fe-fef0f8e35b0c\") " Jan 21 11:35:09 crc kubenswrapper[4925]: W0121 11:35:09.614739 4925 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/a69deccc-7052-4059-b4fe-fef0f8e35b0c/volumes/kubernetes.io~secret/config-data Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.614788 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data" (OuterVolumeSpecName: "config-data") pod "a69deccc-7052-4059-b4fe-fef0f8e35b0c" (UID: "a69deccc-7052-4059-b4fe-fef0f8e35b0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.615347 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9346c552-eed6-447d-aa74-db8a61311a9b" (UID: "9346c552-eed6-447d-aa74-db8a61311a9b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.615419 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44mk4\" (UniqueName: \"kubernetes.io/projected/a69deccc-7052-4059-b4fe-fef0f8e35b0c-kube-api-access-44mk4\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.615438 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9346c552-eed6-447d-aa74-db8a61311a9b-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.615455 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a69deccc-7052-4059-b4fe-fef0f8e35b0c-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.615466 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.615477 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hfnz\" (UniqueName: \"kubernetes.io/projected/9346c552-eed6-447d-aa74-db8a61311a9b-kube-api-access-5hfnz\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.615488 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.623418 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a69deccc-7052-4059-b4fe-fef0f8e35b0c" (UID: "a69deccc-7052-4059-b4fe-fef0f8e35b0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.643284 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "a69deccc-7052-4059-b4fe-fef0f8e35b0c" (UID: "a69deccc-7052-4059-b4fe-fef0f8e35b0c"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.651856 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-config-data" (OuterVolumeSpecName: "config-data") pod "9346c552-eed6-447d-aa74-db8a61311a9b" (UID: "9346c552-eed6-447d-aa74-db8a61311a9b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.830322 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.830363 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a69deccc-7052-4059-b4fe-fef0f8e35b0c-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.830377 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.830388 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.835449 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "9346c552-eed6-447d-aa74-db8a61311a9b" (UID: "9346c552-eed6-447d-aa74-db8a61311a9b"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.858701 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"9346c552-eed6-447d-aa74-db8a61311a9b","Type":"ContainerDied","Data":"6481fb0f153001471a92d02eec09bf66605a11011a32fa7e48d1a1e3d6a9c5e8"} Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.858789 4925 scope.go:117] "RemoveContainer" containerID="bc4daa0c2bfb0deb5b6b2ffa8f5dd788f641851c6ce11ebfe89576a7d34d79e1" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.858970 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.863292 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vzq5s" event={"ID":"bd8d2220-5a18-48db-9492-e92a8033c860","Type":"ContainerDied","Data":"6126cbb8e41d5607264e657ab8986bb155b62dadca9af90aece7fdf4fa573d9f"} Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.863360 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6126cbb8e41d5607264e657ab8986bb155b62dadca9af90aece7fdf4fa573d9f" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.866122 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.875574 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"a69deccc-7052-4059-b4fe-fef0f8e35b0c","Type":"ContainerDied","Data":"b659df86823bcdcb2cad2ec467bb1167e88c94a0094896e9de3d0e032d678d23"} Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.882191 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.907804 4925 scope.go:117] "RemoveContainer" containerID="041c6440e774981b1fe67a67d852feaf1edeff8507de50d02ecd212ae10a3ee3" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.921810 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.930166 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.931899 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-utilities\") pod \"bd8d2220-5a18-48db-9492-e92a8033c860\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.932000 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-catalog-content\") pod \"bd8d2220-5a18-48db-9492-e92a8033c860\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.932125 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlz7f\" (UniqueName: \"kubernetes.io/projected/bd8d2220-5a18-48db-9492-e92a8033c860-kube-api-access-nlz7f\") pod \"bd8d2220-5a18-48db-9492-e92a8033c860\" (UID: \"bd8d2220-5a18-48db-9492-e92a8033c860\") " Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.932958 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/9346c552-eed6-447d-aa74-db8a61311a9b-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.935166 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-utilities" (OuterVolumeSpecName: "utilities") pod "bd8d2220-5a18-48db-9492-e92a8033c860" (UID: "bd8d2220-5a18-48db-9492-e92a8033c860"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.942601 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd8d2220-5a18-48db-9492-e92a8033c860-kube-api-access-nlz7f" (OuterVolumeSpecName: "kube-api-access-nlz7f") pod "bd8d2220-5a18-48db-9492-e92a8033c860" (UID: "bd8d2220-5a18-48db-9492-e92a8033c860"). InnerVolumeSpecName "kube-api-access-nlz7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.946878 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.957688 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.961578 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:35:09 crc kubenswrapper[4925]: I0121 11:35:09.976195 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd8d2220-5a18-48db-9492-e92a8033c860" (UID: "bd8d2220-5a18-48db-9492-e92a8033c860"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.033721 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8ab333d-b735-413d-a903-c8a215353127-logs\") pod \"e8ab333d-b735-413d-a903-c8a215353127\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.033787 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-config-data\") pod \"e8ab333d-b735-413d-a903-c8a215353127\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.033965 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-custom-prometheus-ca\") pod \"e8ab333d-b735-413d-a903-c8a215353127\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.034013 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2nkg\" (UniqueName: \"kubernetes.io/projected/e8ab333d-b735-413d-a903-c8a215353127-kube-api-access-v2nkg\") pod \"e8ab333d-b735-413d-a903-c8a215353127\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.034115 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-cert-memcached-mtls\") pod \"e8ab333d-b735-413d-a903-c8a215353127\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.034187 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-combined-ca-bundle\") pod \"e8ab333d-b735-413d-a903-c8a215353127\" (UID: \"e8ab333d-b735-413d-a903-c8a215353127\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.034673 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlz7f\" (UniqueName: \"kubernetes.io/projected/bd8d2220-5a18-48db-9492-e92a8033c860-kube-api-access-nlz7f\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.034703 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.034723 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd8d2220-5a18-48db-9492-e92a8033c860-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.050171 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8ab333d-b735-413d-a903-c8a215353127-logs" (OuterVolumeSpecName: "logs") pod "e8ab333d-b735-413d-a903-c8a215353127" (UID: "e8ab333d-b735-413d-a903-c8a215353127"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.056615 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8ab333d-b735-413d-a903-c8a215353127-kube-api-access-v2nkg" (OuterVolumeSpecName: "kube-api-access-v2nkg") pod "e8ab333d-b735-413d-a903-c8a215353127" (UID: "e8ab333d-b735-413d-a903-c8a215353127"). InnerVolumeSpecName "kube-api-access-v2nkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.100740 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "e8ab333d-b735-413d-a903-c8a215353127" (UID: "e8ab333d-b735-413d-a903-c8a215353127"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.106871 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-config-data" (OuterVolumeSpecName: "config-data") pod "e8ab333d-b735-413d-a903-c8a215353127" (UID: "e8ab333d-b735-413d-a903-c8a215353127"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.119725 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8ab333d-b735-413d-a903-c8a215353127" (UID: "e8ab333d-b735-413d-a903-c8a215353127"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.136920 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8ab333d-b735-413d-a903-c8a215353127-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.137033 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.137084 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.137100 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2nkg\" (UniqueName: \"kubernetes.io/projected/e8ab333d-b735-413d-a903-c8a215353127-kube-api-access-v2nkg\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.137112 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.168670 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "e8ab333d-b735-413d-a903-c8a215353127" (UID: "e8ab333d-b735-413d-a903-c8a215353127"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.246652 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/e8ab333d-b735-413d-a903-c8a215353127-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.331794 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.450471 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11206d56-c989-474c-9b79-00449dacd0d7-operator-scripts\") pod \"11206d56-c989-474c-9b79-00449dacd0d7\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.450929 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttw7q\" (UniqueName: \"kubernetes.io/projected/11206d56-c989-474c-9b79-00449dacd0d7-kube-api-access-ttw7q\") pod \"11206d56-c989-474c-9b79-00449dacd0d7\" (UID: \"11206d56-c989-474c-9b79-00449dacd0d7\") " Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.451282 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11206d56-c989-474c-9b79-00449dacd0d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11206d56-c989-474c-9b79-00449dacd0d7" (UID: "11206d56-c989-474c-9b79-00449dacd0d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.451696 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11206d56-c989-474c-9b79-00449dacd0d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.459773 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11206d56-c989-474c-9b79-00449dacd0d7-kube-api-access-ttw7q" (OuterVolumeSpecName: "kube-api-access-ttw7q") pod "11206d56-c989-474c-9b79-00449dacd0d7" (UID: "11206d56-c989-474c-9b79-00449dacd0d7"). InnerVolumeSpecName "kube-api-access-ttw7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.716478 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttw7q\" (UniqueName: \"kubernetes.io/projected/11206d56-c989-474c-9b79-00449dacd0d7-kube-api-access-ttw7q\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.888827 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" event={"ID":"11206d56-c989-474c-9b79-00449dacd0d7","Type":"ContainerDied","Data":"8f3a508402a5c042c8276f720742c3894086d53d0ea54ff63985deb316921ffd"} Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.888878 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher50a1-account-delete-qkq9x" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.888893 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f3a508402a5c042c8276f720742c3894086d53d0ea54ff63985deb316921ffd" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.891203 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"e8ab333d-b735-413d-a903-c8a215353127","Type":"ContainerDied","Data":"a3ce428660d973ac1efdfdb620ea84f4a0c1b5e86437f24d11ce4a1898fd8dfa"} Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.891209 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.891245 4925 scope.go:117] "RemoveContainer" containerID="e7b7f1d24deaf19f6642fd47c1c4d5ce271a668f758b8d0664a02194010895f4" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.896381 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vzq5s" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.935367 4925 scope.go:117] "RemoveContainer" containerID="e003e47f4e073c6511a2c5833d0639ee8690b5bdb3e27339f49ffc842d7e7e88" Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.940543 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.948026 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.962082 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vzq5s"] Jan 21 11:35:10 crc kubenswrapper[4925]: I0121 11:35:10.970161 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vzq5s"] Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.513769 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9346c552-eed6-447d-aa74-db8a61311a9b" path="/var/lib/kubelet/pods/9346c552-eed6-447d-aa74-db8a61311a9b/volumes" Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.514635 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a69deccc-7052-4059-b4fe-fef0f8e35b0c" path="/var/lib/kubelet/pods/a69deccc-7052-4059-b4fe-fef0f8e35b0c/volumes" Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.515356 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" path="/var/lib/kubelet/pods/bd8d2220-5a18-48db-9492-e92a8033c860/volumes" Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.517804 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8ab333d-b735-413d-a903-c8a215353127" path="/var/lib/kubelet/pods/e8ab333d-b735-413d-a903-c8a215353127/volumes" Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.811043 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.860364 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="proxy-httpd" containerID="cri-o://b44ffd36c818a8f783858141a417432505a77f51a43a220fecf55e505fc3ccf0" gracePeriod=30 Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.860412 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="sg-core" containerID="cri-o://7d1dac6fce5084d483bbc177ef60f71ee934ffefb2e8330801faf82735c839eb" gracePeriod=30 Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.860533 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-notification-agent" containerID="cri-o://6156d77e752c61225e50e138bc025ff5a43a45248657085461f0b5b0356843c1" gracePeriod=30 Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.860577 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-central-agent" containerID="cri-o://0486ec5470f960b248dfe9771ef155ee6230ce9ca2994bc3e55ecfff96cab169" gracePeriod=30 Jan 21 11:35:11 crc kubenswrapper[4925]: I0121 11:35:11.873823 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.200:3000/\": EOF" Jan 21 11:35:12 crc kubenswrapper[4925]: I0121 11:35:12.095339 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.200:3000/\": dial tcp 10.217.0.200:3000: connect: connection refused" Jan 21 11:35:12 crc kubenswrapper[4925]: I0121 11:35:12.921769 4925 generic.go:334] "Generic (PLEG): container finished" podID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerID="b44ffd36c818a8f783858141a417432505a77f51a43a220fecf55e505fc3ccf0" exitCode=0 Jan 21 11:35:12 crc kubenswrapper[4925]: I0121 11:35:12.921819 4925 generic.go:334] "Generic (PLEG): container finished" podID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerID="7d1dac6fce5084d483bbc177ef60f71ee934ffefb2e8330801faf82735c839eb" exitCode=2 Jan 21 11:35:12 crc kubenswrapper[4925]: I0121 11:35:12.921838 4925 generic.go:334] "Generic (PLEG): container finished" podID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerID="0486ec5470f960b248dfe9771ef155ee6230ce9ca2994bc3e55ecfff96cab169" exitCode=0 Jan 21 11:35:12 crc kubenswrapper[4925]: I0121 11:35:12.921867 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerDied","Data":"b44ffd36c818a8f783858141a417432505a77f51a43a220fecf55e505fc3ccf0"} Jan 21 11:35:12 crc kubenswrapper[4925]: I0121 11:35:12.921903 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerDied","Data":"7d1dac6fce5084d483bbc177ef60f71ee934ffefb2e8330801faf82735c839eb"} Jan 21 11:35:12 crc kubenswrapper[4925]: I0121 11:35:12.921920 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerDied","Data":"0486ec5470f960b248dfe9771ef155ee6230ce9ca2994bc3e55ecfff96cab169"} Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.012829 4925 generic.go:334] "Generic (PLEG): container finished" podID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerID="6156d77e752c61225e50e138bc025ff5a43a45248657085461f0b5b0356843c1" exitCode=0 Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.013214 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerDied","Data":"6156d77e752c61225e50e138bc025ff5a43a45248657085461f0b5b0356843c1"} Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.524010 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.669834 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-ceilometer-tls-certs\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.669922 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-run-httpd\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.669988 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-log-httpd\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.670019 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msmmg\" (UniqueName: \"kubernetes.io/projected/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-kube-api-access-msmmg\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.670062 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-scripts\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.670134 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-sg-core-conf-yaml\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.670162 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-config-data\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.670230 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-combined-ca-bundle\") pod \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\" (UID: \"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47\") " Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.671202 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.671199 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.676575 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-scripts" (OuterVolumeSpecName: "scripts") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.687845 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-kube-api-access-msmmg" (OuterVolumeSpecName: "kube-api-access-msmmg") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "kube-api-access-msmmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.695115 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.723043 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.746465 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.771975 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.772021 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.772033 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.772045 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msmmg\" (UniqueName: \"kubernetes.io/projected/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-kube-api-access-msmmg\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.772059 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.772073 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.772084 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.772617 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-config-data" (OuterVolumeSpecName: "config-data") pod "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" (UID: "6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:14 crc kubenswrapper[4925]: I0121 11:35:14.875661 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.026758 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47","Type":"ContainerDied","Data":"a1b8c8ddbbc8fe4bf61d6a05da140bbc0063c0c0c45a2a937508dd6ab672d677"} Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.026862 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.027218 4925 scope.go:117] "RemoveContainer" containerID="b44ffd36c818a8f783858141a417432505a77f51a43a220fecf55e505fc3ccf0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.062189 4925 scope.go:117] "RemoveContainer" containerID="7d1dac6fce5084d483bbc177ef60f71ee934ffefb2e8330801faf82735c839eb" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.081678 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.090832 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.098159 4925 scope.go:117] "RemoveContainer" containerID="6156d77e752c61225e50e138bc025ff5a43a45248657085461f0b5b0356843c1" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130155 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130808 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-api" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130837 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-api" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130857 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="extract-content" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130864 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="extract-content" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130877 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-kuttl-api-log" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130886 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-kuttl-api-log" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130898 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-central-agent" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130906 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-central-agent" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130921 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="extract-utilities" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130928 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="extract-utilities" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130944 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-notification-agent" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130950 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-notification-agent" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130963 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="sg-core" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130971 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="sg-core" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.130987 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9346c552-eed6-447d-aa74-db8a61311a9b" containerName="watcher-applier" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.130994 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9346c552-eed6-447d-aa74-db8a61311a9b" containerName="watcher-applier" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.131005 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a69deccc-7052-4059-b4fe-fef0f8e35b0c" containerName="watcher-decision-engine" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131012 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="a69deccc-7052-4059-b4fe-fef0f8e35b0c" containerName="watcher-decision-engine" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.131034 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="proxy-httpd" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131040 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="proxy-httpd" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.131053 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="registry-server" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131060 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="registry-server" Jan 21 11:35:15 crc kubenswrapper[4925]: E0121 11:35:15.131075 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11206d56-c989-474c-9b79-00449dacd0d7" containerName="mariadb-account-delete" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131084 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="11206d56-c989-474c-9b79-00449dacd0d7" containerName="mariadb-account-delete" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131279 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="11206d56-c989-474c-9b79-00449dacd0d7" containerName="mariadb-account-delete" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131298 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="a69deccc-7052-4059-b4fe-fef0f8e35b0c" containerName="watcher-decision-engine" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131316 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="proxy-httpd" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131327 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9346c552-eed6-447d-aa74-db8a61311a9b" containerName="watcher-applier" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131338 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-notification-agent" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131348 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd8d2220-5a18-48db-9492-e92a8033c860" containerName="registry-server" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131357 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-kuttl-api-log" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131373 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8ab333d-b735-413d-a903-c8a215353127" containerName="watcher-api" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131384 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="ceilometer-central-agent" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.131419 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" containerName="sg-core" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.133302 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.136470 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.136700 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.137058 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.144482 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.177650 4925 scope.go:117] "RemoveContainer" containerID="0486ec5470f960b248dfe9771ef155ee6230ce9ca2994bc3e55ecfff96cab169" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.281756 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.281828 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-scripts\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.281910 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ntkv\" (UniqueName: \"kubernetes.io/projected/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-kube-api-access-8ntkv\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.281970 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.282010 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.282062 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-log-httpd\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.282125 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-run-httpd\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.282183 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-config-data\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.484826 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-scripts\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.484947 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ntkv\" (UniqueName: \"kubernetes.io/projected/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-kube-api-access-8ntkv\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.485003 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.485044 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.485091 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-log-httpd\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.485162 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-run-httpd\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.485206 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-config-data\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.485229 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.488589 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-run-httpd\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.488883 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-log-httpd\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.492838 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.495088 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-config-data\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.502264 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-scripts\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.508632 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.509380 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.546092 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ntkv\" (UniqueName: \"kubernetes.io/projected/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-kube-api-access-8ntkv\") pod \"ceilometer-0\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.565950 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47" path="/var/lib/kubelet/pods/6112f63c-c3e0-44a9-a3d9-ca7e9ebcdd47/volumes" Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.567153 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-jzpjj"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.567264 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-jzpjj"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.571453 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher50a1-account-delete-qkq9x"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.581763 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-50a1-account-create-update-h44lh"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.597933 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher50a1-account-delete-qkq9x"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.605094 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-50a1-account-create-update-h44lh"] Jan 21 11:35:15 crc kubenswrapper[4925]: I0121 11:35:15.784204 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.325708 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.789301 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-e692-account-create-update-cdpr4"] Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.791760 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.796213 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.813530 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-dgm48"] Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.815042 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.823026 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-e692-account-create-update-cdpr4"] Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.835596 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dgm48"] Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.910904 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ceed0c36-8aad-46f0-ae96-7d370522f137-operator-scripts\") pod \"watcher-e692-account-create-update-cdpr4\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:16 crc kubenswrapper[4925]: I0121 11:35:16.911374 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kchmv\" (UniqueName: \"kubernetes.io/projected/ceed0c36-8aad-46f0-ae96-7d370522f137-kube-api-access-kchmv\") pod \"watcher-e692-account-create-update-cdpr4\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.140906 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-operator-scripts\") pod \"watcher-db-create-dgm48\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.141081 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr64n\" (UniqueName: \"kubernetes.io/projected/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-kube-api-access-tr64n\") pod \"watcher-db-create-dgm48\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.141138 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ceed0c36-8aad-46f0-ae96-7d370522f137-operator-scripts\") pod \"watcher-e692-account-create-update-cdpr4\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.141591 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kchmv\" (UniqueName: \"kubernetes.io/projected/ceed0c36-8aad-46f0-ae96-7d370522f137-kube-api-access-kchmv\") pod \"watcher-e692-account-create-update-cdpr4\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.142358 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ceed0c36-8aad-46f0-ae96-7d370522f137-operator-scripts\") pod \"watcher-e692-account-create-update-cdpr4\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.162654 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerStarted","Data":"2867af16bf720f653021738e7b08be931a2a35633812b05cd652d8dfddf46f03"} Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.180588 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kchmv\" (UniqueName: \"kubernetes.io/projected/ceed0c36-8aad-46f0-ae96-7d370522f137-kube-api-access-kchmv\") pod \"watcher-e692-account-create-update-cdpr4\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.245661 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-operator-scripts\") pod \"watcher-db-create-dgm48\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.246213 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr64n\" (UniqueName: \"kubernetes.io/projected/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-kube-api-access-tr64n\") pod \"watcher-db-create-dgm48\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.247042 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-operator-scripts\") pod \"watcher-db-create-dgm48\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.315168 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr64n\" (UniqueName: \"kubernetes.io/projected/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-kube-api-access-tr64n\") pod \"watcher-db-create-dgm48\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.420597 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.456414 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.532833 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11206d56-c989-474c-9b79-00449dacd0d7" path="/var/lib/kubelet/pods/11206d56-c989-474c-9b79-00449dacd0d7/volumes" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.537475 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc" path="/var/lib/kubelet/pods/96ef9c90-9487-4fa1-9f7c-b69cbe3bfabc/volumes" Jan 21 11:35:17 crc kubenswrapper[4925]: I0121 11:35:17.538536 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99daa308-0637-441e-86ae-8692e2155898" path="/var/lib/kubelet/pods/99daa308-0637-441e-86ae-8692e2155898/volumes" Jan 21 11:35:18 crc kubenswrapper[4925]: I0121 11:35:18.158966 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dgm48"] Jan 21 11:35:18 crc kubenswrapper[4925]: I0121 11:35:18.176534 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerStarted","Data":"01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773"} Jan 21 11:35:18 crc kubenswrapper[4925]: W0121 11:35:18.189438 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ea99ecc_e365_44f6_9e59_b4a4bea0d1d8.slice/crio-b765705bd09294241dcf2d0d56c5f544b81c0f8e716e427fb707927388de128a WatchSource:0}: Error finding container b765705bd09294241dcf2d0d56c5f544b81c0f8e716e427fb707927388de128a: Status 404 returned error can't find the container with id b765705bd09294241dcf2d0d56c5f544b81c0f8e716e427fb707927388de128a Jan 21 11:35:18 crc kubenswrapper[4925]: I0121 11:35:18.273144 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-e692-account-create-update-cdpr4"] Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.198501 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" event={"ID":"ceed0c36-8aad-46f0-ae96-7d370522f137","Type":"ContainerStarted","Data":"cbe7355def38373967dececdef68f3770f3edddb81a7a49faa58ae53512747ff"} Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.198876 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" event={"ID":"ceed0c36-8aad-46f0-ae96-7d370522f137","Type":"ContainerStarted","Data":"5cf5aa601223b48957397a2e3910c2d1e600172a6f2f9754110d58f32a09d213"} Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.204333 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerStarted","Data":"cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0"} Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.207595 4925 generic.go:334] "Generic (PLEG): container finished" podID="8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8" containerID="6a4cddc21beee0bd825ac08e15631e6c37747e29e1310fc29b36730d1fdb747a" exitCode=0 Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.207688 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-dgm48" event={"ID":"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8","Type":"ContainerDied","Data":"6a4cddc21beee0bd825ac08e15631e6c37747e29e1310fc29b36730d1fdb747a"} Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.207745 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-dgm48" event={"ID":"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8","Type":"ContainerStarted","Data":"b765705bd09294241dcf2d0d56c5f544b81c0f8e716e427fb707927388de128a"} Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.941308 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:35:19 crc kubenswrapper[4925]: I0121 11:35:19.941757 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:35:20 crc kubenswrapper[4925]: I0121 11:35:20.219600 4925 generic.go:334] "Generic (PLEG): container finished" podID="ceed0c36-8aad-46f0-ae96-7d370522f137" containerID="cbe7355def38373967dececdef68f3770f3edddb81a7a49faa58ae53512747ff" exitCode=0 Jan 21 11:35:20 crc kubenswrapper[4925]: I0121 11:35:20.219667 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" event={"ID":"ceed0c36-8aad-46f0-ae96-7d370522f137","Type":"ContainerDied","Data":"cbe7355def38373967dececdef68f3770f3edddb81a7a49faa58ae53512747ff"} Jan 21 11:35:20 crc kubenswrapper[4925]: I0121 11:35:20.224231 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerStarted","Data":"eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89"} Jan 21 11:35:20 crc kubenswrapper[4925]: I0121 11:35:20.861204 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.018170 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.204146 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-operator-scripts\") pod \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.204378 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tr64n\" (UniqueName: \"kubernetes.io/projected/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-kube-api-access-tr64n\") pod \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\" (UID: \"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8\") " Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.205672 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8" (UID: "8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.241367 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-kube-api-access-tr64n" (OuterVolumeSpecName: "kube-api-access-tr64n") pod "8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8" (UID: "8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8"). InnerVolumeSpecName "kube-api-access-tr64n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.259695 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.260375 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-e692-account-create-update-cdpr4" event={"ID":"ceed0c36-8aad-46f0-ae96-7d370522f137","Type":"ContainerDied","Data":"5cf5aa601223b48957397a2e3910c2d1e600172a6f2f9754110d58f32a09d213"} Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.260435 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5cf5aa601223b48957397a2e3910c2d1e600172a6f2f9754110d58f32a09d213" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.262910 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-dgm48" event={"ID":"8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8","Type":"ContainerDied","Data":"b765705bd09294241dcf2d0d56c5f544b81c0f8e716e427fb707927388de128a"} Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.262975 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b765705bd09294241dcf2d0d56c5f544b81c0f8e716e427fb707927388de128a" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.263056 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-dgm48" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.305438 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kchmv\" (UniqueName: \"kubernetes.io/projected/ceed0c36-8aad-46f0-ae96-7d370522f137-kube-api-access-kchmv\") pod \"ceed0c36-8aad-46f0-ae96-7d370522f137\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.305504 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ceed0c36-8aad-46f0-ae96-7d370522f137-operator-scripts\") pod \"ceed0c36-8aad-46f0-ae96-7d370522f137\" (UID: \"ceed0c36-8aad-46f0-ae96-7d370522f137\") " Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.306023 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tr64n\" (UniqueName: \"kubernetes.io/projected/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-kube-api-access-tr64n\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.306044 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.306527 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ceed0c36-8aad-46f0-ae96-7d370522f137-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ceed0c36-8aad-46f0-ae96-7d370522f137" (UID: "ceed0c36-8aad-46f0-ae96-7d370522f137"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.311037 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceed0c36-8aad-46f0-ae96-7d370522f137-kube-api-access-kchmv" (OuterVolumeSpecName: "kube-api-access-kchmv") pod "ceed0c36-8aad-46f0-ae96-7d370522f137" (UID: "ceed0c36-8aad-46f0-ae96-7d370522f137"). InnerVolumeSpecName "kube-api-access-kchmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.408503 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kchmv\" (UniqueName: \"kubernetes.io/projected/ceed0c36-8aad-46f0-ae96-7d370522f137-kube-api-access-kchmv\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:21 crc kubenswrapper[4925]: I0121 11:35:21.408899 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ceed0c36-8aad-46f0-ae96-7d370522f137-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:22 crc kubenswrapper[4925]: I0121 11:35:22.304297 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerStarted","Data":"21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72"} Jan 21 11:35:22 crc kubenswrapper[4925]: I0121 11:35:22.305430 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:22 crc kubenswrapper[4925]: I0121 11:35:22.334973 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.1200693250000002 podStartE2EDuration="7.334945641s" podCreationTimestamp="2026-01-21 11:35:15 +0000 UTC" firstStartedPulling="2026-01-21 11:35:16.33590784 +0000 UTC m=+2407.939799774" lastFinishedPulling="2026-01-21 11:35:21.550784156 +0000 UTC m=+2413.154676090" observedRunningTime="2026-01-21 11:35:22.332986119 +0000 UTC m=+2413.936878073" watchObservedRunningTime="2026-01-21 11:35:22.334945641 +0000 UTC m=+2413.938837575" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.130236 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-n5glp"] Jan 21 11:35:27 crc kubenswrapper[4925]: E0121 11:35:27.131550 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceed0c36-8aad-46f0-ae96-7d370522f137" containerName="mariadb-account-create-update" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.131572 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceed0c36-8aad-46f0-ae96-7d370522f137" containerName="mariadb-account-create-update" Jan 21 11:35:27 crc kubenswrapper[4925]: E0121 11:35:27.131602 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8" containerName="mariadb-database-create" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.131609 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8" containerName="mariadb-database-create" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.131856 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceed0c36-8aad-46f0-ae96-7d370522f137" containerName="mariadb-account-create-update" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.131918 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8" containerName="mariadb-database-create" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.132580 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.134651 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.136131 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-x9kvk" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.154110 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-n5glp"] Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.244982 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-db-sync-config-data\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.245119 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.245180 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kc5sz\" (UniqueName: \"kubernetes.io/projected/c2a51189-7332-4b67-81cf-6c974069f0f7-kube-api-access-kc5sz\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.245217 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-config-data\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.349285 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kc5sz\" (UniqueName: \"kubernetes.io/projected/c2a51189-7332-4b67-81cf-6c974069f0f7-kube-api-access-kc5sz\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.349858 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-config-data\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.350029 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-db-sync-config-data\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.350178 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.359762 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.364782 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-db-sync-config-data\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.368660 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-config-data\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.381449 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kc5sz\" (UniqueName: \"kubernetes.io/projected/c2a51189-7332-4b67-81cf-6c974069f0f7-kube-api-access-kc5sz\") pod \"watcher-kuttl-db-sync-n5glp\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:27 crc kubenswrapper[4925]: I0121 11:35:27.462357 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:28 crc kubenswrapper[4925]: I0121 11:35:28.124476 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-n5glp"] Jan 21 11:35:28 crc kubenswrapper[4925]: I0121 11:35:28.369083 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" event={"ID":"c2a51189-7332-4b67-81cf-6c974069f0f7","Type":"ContainerStarted","Data":"4a8c94821586a7ef8a896989456db3b33649dfe9689bd65bb989c0fb04130218"} Jan 21 11:35:29 crc kubenswrapper[4925]: I0121 11:35:29.381320 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" event={"ID":"c2a51189-7332-4b67-81cf-6c974069f0f7","Type":"ContainerStarted","Data":"6953899fcdf4ed8d6235cd8bfc0b3d0780f42647d8602f55be72fca6ef74ca54"} Jan 21 11:35:29 crc kubenswrapper[4925]: I0121 11:35:29.411695 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" podStartSLOduration=2.411660222 podStartE2EDuration="2.411660222s" podCreationTimestamp="2026-01-21 11:35:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:35:29.405936382 +0000 UTC m=+2421.009828316" watchObservedRunningTime="2026-01-21 11:35:29.411660222 +0000 UTC m=+2421.015552156" Jan 21 11:35:30 crc kubenswrapper[4925]: I0121 11:35:30.695652 4925 scope.go:117] "RemoveContainer" containerID="e6199904b341b7b85a7f106273a26478355bf67ec882e68c0bcc848e0117a6c8" Jan 21 11:35:32 crc kubenswrapper[4925]: I0121 11:35:32.413900 4925 generic.go:334] "Generic (PLEG): container finished" podID="c2a51189-7332-4b67-81cf-6c974069f0f7" containerID="6953899fcdf4ed8d6235cd8bfc0b3d0780f42647d8602f55be72fca6ef74ca54" exitCode=0 Jan 21 11:35:32 crc kubenswrapper[4925]: I0121 11:35:32.413979 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" event={"ID":"c2a51189-7332-4b67-81cf-6c974069f0f7","Type":"ContainerDied","Data":"6953899fcdf4ed8d6235cd8bfc0b3d0780f42647d8602f55be72fca6ef74ca54"} Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.822850 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.953046 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-config-data\") pod \"c2a51189-7332-4b67-81cf-6c974069f0f7\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.953123 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-combined-ca-bundle\") pod \"c2a51189-7332-4b67-81cf-6c974069f0f7\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.953207 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kc5sz\" (UniqueName: \"kubernetes.io/projected/c2a51189-7332-4b67-81cf-6c974069f0f7-kube-api-access-kc5sz\") pod \"c2a51189-7332-4b67-81cf-6c974069f0f7\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.953274 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-db-sync-config-data\") pod \"c2a51189-7332-4b67-81cf-6c974069f0f7\" (UID: \"c2a51189-7332-4b67-81cf-6c974069f0f7\") " Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.958901 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2a51189-7332-4b67-81cf-6c974069f0f7-kube-api-access-kc5sz" (OuterVolumeSpecName: "kube-api-access-kc5sz") pod "c2a51189-7332-4b67-81cf-6c974069f0f7" (UID: "c2a51189-7332-4b67-81cf-6c974069f0f7"). InnerVolumeSpecName "kube-api-access-kc5sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.958904 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c2a51189-7332-4b67-81cf-6c974069f0f7" (UID: "c2a51189-7332-4b67-81cf-6c974069f0f7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:33 crc kubenswrapper[4925]: I0121 11:35:33.983188 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2a51189-7332-4b67-81cf-6c974069f0f7" (UID: "c2a51189-7332-4b67-81cf-6c974069f0f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.005423 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-config-data" (OuterVolumeSpecName: "config-data") pod "c2a51189-7332-4b67-81cf-6c974069f0f7" (UID: "c2a51189-7332-4b67-81cf-6c974069f0f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.056013 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.056074 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.056093 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kc5sz\" (UniqueName: \"kubernetes.io/projected/c2a51189-7332-4b67-81cf-6c974069f0f7-kube-api-access-kc5sz\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.056107 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c2a51189-7332-4b67-81cf-6c974069f0f7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.454670 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" event={"ID":"c2a51189-7332-4b67-81cf-6c974069f0f7","Type":"ContainerDied","Data":"4a8c94821586a7ef8a896989456db3b33649dfe9689bd65bb989c0fb04130218"} Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.454726 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a8c94821586a7ef8a896989456db3b33649dfe9689bd65bb989c0fb04130218" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.454767 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-n5glp" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.819717 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:35:34 crc kubenswrapper[4925]: E0121 11:35:34.820565 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2a51189-7332-4b67-81cf-6c974069f0f7" containerName="watcher-kuttl-db-sync" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.820590 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2a51189-7332-4b67-81cf-6c974069f0f7" containerName="watcher-kuttl-db-sync" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.820811 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2a51189-7332-4b67-81cf-6c974069f0f7" containerName="watcher-kuttl-db-sync" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.822055 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.825962 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-x9kvk" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.827635 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.841578 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.852961 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.858685 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.896532 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.974713 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979474 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979541 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2pwj\" (UniqueName: \"kubernetes.io/projected/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-kube-api-access-c2pwj\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979603 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979635 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979663 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979693 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89stx\" (UniqueName: \"kubernetes.io/projected/3ad69b0e-0d3a-4c21-979e-9078059d7c95-kube-api-access-89stx\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979723 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad69b0e-0d3a-4c21-979e-9078059d7c95-logs\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979764 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979894 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.979988 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-logs\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.980022 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.980069 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.981852 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.995084 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:35:34 crc kubenswrapper[4925]: I0121 11:35:34.998233 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082245 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082315 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082361 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082457 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082538 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-logs\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082567 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-875pw\" (UniqueName: \"kubernetes.io/projected/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-kube-api-access-875pw\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082591 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082629 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082673 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082706 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082736 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082765 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2pwj\" (UniqueName: \"kubernetes.io/projected/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-kube-api-access-c2pwj\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082799 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082822 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082850 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082873 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082902 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89stx\" (UniqueName: \"kubernetes.io/projected/3ad69b0e-0d3a-4c21-979e-9078059d7c95-kube-api-access-89stx\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.082930 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad69b0e-0d3a-4c21-979e-9078059d7c95-logs\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.084521 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad69b0e-0d3a-4c21-979e-9078059d7c95-logs\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.085319 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-logs\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.095745 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.095772 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.096083 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.100020 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.100787 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.106936 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.107130 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.111340 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.112222 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2pwj\" (UniqueName: \"kubernetes.io/projected/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-kube-api-access-c2pwj\") pod \"watcher-kuttl-api-0\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.118754 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.119942 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.122071 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89stx\" (UniqueName: \"kubernetes.io/projected/3ad69b0e-0d3a-4c21-979e-9078059d7c95-kube-api-access-89stx\") pod \"watcher-kuttl-api-1\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.133697 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.142946 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.144908 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.182804 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.183992 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.184049 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.184086 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.184117 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.184134 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.184225 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-875pw\" (UniqueName: \"kubernetes.io/projected/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-kube-api-access-875pw\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.197522 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.197848 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.208011 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.222842 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.223122 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.230411 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-875pw\" (UniqueName: \"kubernetes.io/projected/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-kube-api-access-875pw\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.285707 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nck4w\" (UniqueName: \"kubernetes.io/projected/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-kube-api-access-nck4w\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.285787 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.285861 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.286008 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.286059 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.313502 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.387842 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.388326 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.388363 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.388487 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nck4w\" (UniqueName: \"kubernetes.io/projected/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-kube-api-access-nck4w\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.388563 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.395090 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.396047 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.404192 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.404767 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.419460 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nck4w\" (UniqueName: \"kubernetes.io/projected/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-kube-api-access-nck4w\") pod \"watcher-kuttl-applier-0\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.624205 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.919802 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:35:35 crc kubenswrapper[4925]: I0121 11:35:35.960501 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:35:35 crc kubenswrapper[4925]: W0121 11:35:35.966702 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ad69b0e_0d3a_4c21_979e_9078059d7c95.slice/crio-db5376f1981766fb18e2179eb4e3e382e3ac7ee4df2f3a724f706a530b32dd2d WatchSource:0}: Error finding container db5376f1981766fb18e2179eb4e3e382e3ac7ee4df2f3a724f706a530b32dd2d: Status 404 returned error can't find the container with id db5376f1981766fb18e2179eb4e3e382e3ac7ee4df2f3a724f706a530b32dd2d Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.105014 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.291507 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.481317 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"3ad69b0e-0d3a-4c21-979e-9078059d7c95","Type":"ContainerStarted","Data":"1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d"} Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.481419 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"3ad69b0e-0d3a-4c21-979e-9078059d7c95","Type":"ContainerStarted","Data":"db5376f1981766fb18e2179eb4e3e382e3ac7ee4df2f3a724f706a530b32dd2d"} Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.483547 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"efdd6f82-fbae-41fa-a61d-f92e9729b3c3","Type":"ContainerStarted","Data":"fa5a0518586966a28c7024268a32c1bc09183e560e291add84f94eb233815d86"} Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.485703 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"38b8bc42-83a8-4f30-a976-d7ba8c7eec87","Type":"ContainerStarted","Data":"1b9b8bd03c445b15ed32d8e85c4fdc527a791572237f5990164e237a66160acf"} Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.485763 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"38b8bc42-83a8-4f30-a976-d7ba8c7eec87","Type":"ContainerStarted","Data":"da7af09bc67c06956da70409f46e5a214a9585d6a600eee9184a030542d2241b"} Jan 21 11:35:36 crc kubenswrapper[4925]: I0121 11:35:36.487004 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a28fde9c-8bf6-4e3e-9721-7f4507fcc815","Type":"ContainerStarted","Data":"eb43523d7c91fa030b09afe77215f622f72e6cad162df718744b756d6384c9f2"} Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.526104 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"3ad69b0e-0d3a-4c21-979e-9078059d7c95","Type":"ContainerStarted","Data":"59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175"} Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.526562 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.526577 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"efdd6f82-fbae-41fa-a61d-f92e9729b3c3","Type":"ContainerStarted","Data":"383300fd3d490560750ef8967910f199d11e380624791502c07b8e87e225c821"} Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.527620 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"38b8bc42-83a8-4f30-a976-d7ba8c7eec87","Type":"ContainerStarted","Data":"3aeca06d17186a01a51f6eadfcf236ee3433b821adf2546ac831dd0c53deef69"} Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.528576 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.532239 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a28fde9c-8bf6-4e3e-9721-7f4507fcc815","Type":"ContainerStarted","Data":"2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1"} Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.543918 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-1" podStartSLOduration=3.543894464 podStartE2EDuration="3.543894464s" podCreationTimestamp="2026-01-21 11:35:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:35:37.542354585 +0000 UTC m=+2429.146246529" watchObservedRunningTime="2026-01-21 11:35:37.543894464 +0000 UTC m=+2429.147786398" Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.580051 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=2.580030313 podStartE2EDuration="2.580030313s" podCreationTimestamp="2026-01-21 11:35:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:35:37.573794816 +0000 UTC m=+2429.177686750" watchObservedRunningTime="2026-01-21 11:35:37.580030313 +0000 UTC m=+2429.183922247" Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.605511 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=3.605481086 podStartE2EDuration="3.605481086s" podCreationTimestamp="2026-01-21 11:35:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:35:37.597880195 +0000 UTC m=+2429.201772149" watchObservedRunningTime="2026-01-21 11:35:37.605481086 +0000 UTC m=+2429.209373020" Jan 21 11:35:37 crc kubenswrapper[4925]: I0121 11:35:37.622871 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=3.622841583 podStartE2EDuration="3.622841583s" podCreationTimestamp="2026-01-21 11:35:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:35:37.618658621 +0000 UTC m=+2429.222550585" watchObservedRunningTime="2026-01-21 11:35:37.622841583 +0000 UTC m=+2429.226733517" Jan 21 11:35:40 crc kubenswrapper[4925]: I0121 11:35:40.146120 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:40 crc kubenswrapper[4925]: I0121 11:35:40.146638 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:35:40 crc kubenswrapper[4925]: I0121 11:35:40.184227 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:40 crc kubenswrapper[4925]: I0121 11:35:40.184370 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:35:40 crc kubenswrapper[4925]: I0121 11:35:40.624721 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:40 crc kubenswrapper[4925]: I0121 11:35:40.823806 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:41 crc kubenswrapper[4925]: I0121 11:35:41.256272 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.146122 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.170712 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.185517 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.310893 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.320741 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.358045 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.625435 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.628790 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.634647 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.639026 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.664764 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.670836 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:45 crc kubenswrapper[4925]: I0121 11:35:45.826628 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:46 crc kubenswrapper[4925]: I0121 11:35:46.668517 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:35:48 crc kubenswrapper[4925]: I0121 11:35:48.966014 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:48 crc kubenswrapper[4925]: I0121 11:35:48.966795 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-central-agent" containerID="cri-o://01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773" gracePeriod=30 Jan 21 11:35:48 crc kubenswrapper[4925]: I0121 11:35:48.966845 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="proxy-httpd" containerID="cri-o://21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72" gracePeriod=30 Jan 21 11:35:48 crc kubenswrapper[4925]: I0121 11:35:48.966916 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="sg-core" containerID="cri-o://eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89" gracePeriod=30 Jan 21 11:35:48 crc kubenswrapper[4925]: I0121 11:35:48.966969 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-notification-agent" containerID="cri-o://cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0" gracePeriod=30 Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.865916 4925 generic.go:334] "Generic (PLEG): container finished" podID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerID="21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72" exitCode=0 Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.867003 4925 generic.go:334] "Generic (PLEG): container finished" podID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerID="eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89" exitCode=2 Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.867027 4925 generic.go:334] "Generic (PLEG): container finished" podID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerID="01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773" exitCode=0 Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.866241 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerDied","Data":"21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72"} Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.867077 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerDied","Data":"eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89"} Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.867095 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerDied","Data":"01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773"} Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.943180 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:35:49 crc kubenswrapper[4925]: I0121 11:35:49.943279 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.649494 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.862439 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ntkv\" (UniqueName: \"kubernetes.io/projected/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-kube-api-access-8ntkv\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.867603 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-config-data\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.867934 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-sg-core-conf-yaml\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.868062 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-log-httpd\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.868212 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-ceilometer-tls-certs\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.868341 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-run-httpd\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.868471 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-combined-ca-bundle\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.868619 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-scripts\") pod \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\" (UID: \"0d2ebd6e-c0d1-4547-a53d-c01b150391cf\") " Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.873174 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-kube-api-access-8ntkv" (OuterVolumeSpecName: "kube-api-access-8ntkv") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "kube-api-access-8ntkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.873820 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.880080 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.886005 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-scripts" (OuterVolumeSpecName: "scripts") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.949240 4925 generic.go:334] "Generic (PLEG): container finished" podID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerID="cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0" exitCode=0 Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.949303 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerDied","Data":"cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0"} Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.949366 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"0d2ebd6e-c0d1-4547-a53d-c01b150391cf","Type":"ContainerDied","Data":"2867af16bf720f653021738e7b08be931a2a35633812b05cd652d8dfddf46f03"} Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.949387 4925 scope.go:117] "RemoveContainer" containerID="21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.949645 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.973312 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.973358 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ntkv\" (UniqueName: \"kubernetes.io/projected/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-kube-api-access-8ntkv\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.973375 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.973386 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:50 crc kubenswrapper[4925]: I0121 11:35:50.974139 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:50.996751 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.024956 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.049379 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-config-data" (OuterVolumeSpecName: "config-data") pod "0d2ebd6e-c0d1-4547-a53d-c01b150391cf" (UID: "0d2ebd6e-c0d1-4547-a53d-c01b150391cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.065658 4925 scope.go:117] "RemoveContainer" containerID="eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.079991 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.080045 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.080059 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.080073 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2ebd6e-c0d1-4547-a53d-c01b150391cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.085183 4925 scope.go:117] "RemoveContainer" containerID="cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.118881 4925 scope.go:117] "RemoveContainer" containerID="01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.142491 4925 scope.go:117] "RemoveContainer" containerID="21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72" Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.143172 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72\": container with ID starting with 21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72 not found: ID does not exist" containerID="21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.143241 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72"} err="failed to get container status \"21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72\": rpc error: code = NotFound desc = could not find container \"21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72\": container with ID starting with 21ecb17207b61353679b2e99941cec5f3934ef36c4d4c7800e313d2edf0f2b72 not found: ID does not exist" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.143282 4925 scope.go:117] "RemoveContainer" containerID="eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89" Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.143997 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89\": container with ID starting with eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89 not found: ID does not exist" containerID="eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.144054 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89"} err="failed to get container status \"eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89\": rpc error: code = NotFound desc = could not find container \"eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89\": container with ID starting with eeb9d0603ee4834d57967914cef7644ac105131e1655350c54b7d2ad74dabd89 not found: ID does not exist" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.144089 4925 scope.go:117] "RemoveContainer" containerID="cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0" Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.144735 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0\": container with ID starting with cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0 not found: ID does not exist" containerID="cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.144801 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0"} err="failed to get container status \"cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0\": rpc error: code = NotFound desc = could not find container \"cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0\": container with ID starting with cbb52794cabb45efe0cc041ae465b0487e470a89375c7d1d2a9e1154003750c0 not found: ID does not exist" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.144831 4925 scope.go:117] "RemoveContainer" containerID="01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773" Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.145458 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773\": container with ID starting with 01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773 not found: ID does not exist" containerID="01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.145504 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773"} err="failed to get container status \"01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773\": rpc error: code = NotFound desc = could not find container \"01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773\": container with ID starting with 01e7c32493fbca6ad4037a75b27a82e23c2ae9b1849c2a090d0bf39801cb0773 not found: ID does not exist" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.315433 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.326720 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.367468 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.368125 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-notification-agent" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368160 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-notification-agent" Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.368184 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="proxy-httpd" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368190 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="proxy-httpd" Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.368224 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-central-agent" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368231 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-central-agent" Jan 21 11:35:51 crc kubenswrapper[4925]: E0121 11:35:51.368242 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="sg-core" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368248 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="sg-core" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368463 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-notification-agent" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368495 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="sg-core" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368508 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="ceilometer-central-agent" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.368522 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" containerName="proxy-httpd" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.371201 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.377138 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.377158 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.377557 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.404473 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490193 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-log-httpd\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490261 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-run-httpd\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490411 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490449 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490710 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490835 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-scripts\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490903 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lqbx\" (UniqueName: \"kubernetes.io/projected/3daf4d63-4a4e-460a-bdd3-14add115e2b2-kube-api-access-2lqbx\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.490954 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-config-data\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.517289 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d2ebd6e-c0d1-4547-a53d-c01b150391cf" path="/var/lib/kubelet/pods/0d2ebd6e-c0d1-4547-a53d-c01b150391cf/volumes" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.592979 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-scripts\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.593040 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lqbx\" (UniqueName: \"kubernetes.io/projected/3daf4d63-4a4e-460a-bdd3-14add115e2b2-kube-api-access-2lqbx\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.593068 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-config-data\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.593103 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-log-httpd\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.593120 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-run-httpd\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.593197 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.593218 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.593254 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.594130 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-run-httpd\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.594194 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-log-httpd\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.601897 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-config-data\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.602284 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.603162 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.604090 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.613368 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-scripts\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.618579 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lqbx\" (UniqueName: \"kubernetes.io/projected/3daf4d63-4a4e-460a-bdd3-14add115e2b2-kube-api-access-2lqbx\") pod \"ceilometer-0\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:51 crc kubenswrapper[4925]: I0121 11:35:51.705418 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:52 crc kubenswrapper[4925]: I0121 11:35:52.324814 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:35:53 crc kubenswrapper[4925]: I0121 11:35:53.004522 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerStarted","Data":"519b4a476f3bea6ba4b0c2f7ab815920a1b9f74975b816167c9a1f49b8020af9"} Jan 21 11:35:54 crc kubenswrapper[4925]: I0121 11:35:54.021080 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerStarted","Data":"a4873bd8419f25628a4d06624c9c7af93a0d117dd6763d940ffccbdb07370bea"} Jan 21 11:35:55 crc kubenswrapper[4925]: I0121 11:35:55.040180 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerStarted","Data":"2d41dd849a503bf131acf8dc139591987bac9a8034f9c4781b570f088537291c"} Jan 21 11:35:56 crc kubenswrapper[4925]: I0121 11:35:56.073500 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerStarted","Data":"55cd76f2a9fa1e5b5b216e1542a7350fc67a4c8c120528918d53361e1d5fb46e"} Jan 21 11:35:58 crc kubenswrapper[4925]: I0121 11:35:58.100900 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerStarted","Data":"426fed9c2aea6aad00a0d70d99694643eb8d7aa0b1b803485ef89e80691ae95e"} Jan 21 11:35:58 crc kubenswrapper[4925]: I0121 11:35:58.102983 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:35:58 crc kubenswrapper[4925]: I0121 11:35:58.149590 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.788835762 podStartE2EDuration="7.149553707s" podCreationTimestamp="2026-01-21 11:35:51 +0000 UTC" firstStartedPulling="2026-01-21 11:35:52.342133197 +0000 UTC m=+2443.946025131" lastFinishedPulling="2026-01-21 11:35:56.702851142 +0000 UTC m=+2448.306743076" observedRunningTime="2026-01-21 11:35:58.141488902 +0000 UTC m=+2449.745380836" watchObservedRunningTime="2026-01-21 11:35:58.149553707 +0000 UTC m=+2449.753445641" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.098830 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.101824 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.248057 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h822z\" (UniqueName: \"kubernetes.io/projected/cb9f8189-67da-4024-983a-e7640b0e3bbf-kube-api-access-h822z\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.248146 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-config-data\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.248197 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-custom-prometheus-ca\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.248234 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-cert-memcached-mtls\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.248265 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb9f8189-67da-4024-983a-e7640b0e3bbf-logs\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.248281 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-combined-ca-bundle\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.287628 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.354619 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h822z\" (UniqueName: \"kubernetes.io/projected/cb9f8189-67da-4024-983a-e7640b0e3bbf-kube-api-access-h822z\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.354792 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-config-data\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.354913 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-custom-prometheus-ca\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.354969 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-cert-memcached-mtls\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.355026 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb9f8189-67da-4024-983a-e7640b0e3bbf-logs\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.355051 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-combined-ca-bundle\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.358235 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb9f8189-67da-4024-983a-e7640b0e3bbf-logs\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.373510 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-custom-prometheus-ca\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.374039 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-cert-memcached-mtls\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.374697 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-combined-ca-bundle\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.375153 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-config-data\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.390937 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h822z\" (UniqueName: \"kubernetes.io/projected/cb9f8189-67da-4024-983a-e7640b0e3bbf-kube-api-access-h822z\") pod \"watcher-kuttl-api-2\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:00 crc kubenswrapper[4925]: I0121 11:36:00.601569 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:01 crc kubenswrapper[4925]: I0121 11:36:01.310213 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Jan 21 11:36:02 crc kubenswrapper[4925]: I0121 11:36:02.301029 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cb9f8189-67da-4024-983a-e7640b0e3bbf","Type":"ContainerStarted","Data":"729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926"} Jan 21 11:36:02 crc kubenswrapper[4925]: I0121 11:36:02.301423 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:02 crc kubenswrapper[4925]: I0121 11:36:02.301437 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cb9f8189-67da-4024-983a-e7640b0e3bbf","Type":"ContainerStarted","Data":"59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219"} Jan 21 11:36:02 crc kubenswrapper[4925]: I0121 11:36:02.301450 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cb9f8189-67da-4024-983a-e7640b0e3bbf","Type":"ContainerStarted","Data":"6fe40802bfa012e896ead542486470844d6e1c2f0337609e5685805870cfa13e"} Jan 21 11:36:02 crc kubenswrapper[4925]: I0121 11:36:02.304093 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-2" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.213:9322/\": dial tcp 10.217.0.213:9322: connect: connection refused" Jan 21 11:36:02 crc kubenswrapper[4925]: I0121 11:36:02.323169 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-2" podStartSLOduration=2.323149332 podStartE2EDuration="2.323149332s" podCreationTimestamp="2026-01-21 11:36:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:36:02.32119912 +0000 UTC m=+2453.925091054" watchObservedRunningTime="2026-01-21 11:36:02.323149332 +0000 UTC m=+2453.927041266" Jan 21 11:36:05 crc kubenswrapper[4925]: I0121 11:36:05.601784 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:07 crc kubenswrapper[4925]: I0121 11:36:07.316380 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:10 crc kubenswrapper[4925]: I0121 11:36:10.603809 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:10 crc kubenswrapper[4925]: I0121 11:36:10.619214 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:11 crc kubenswrapper[4925]: I0121 11:36:11.521739 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.008606 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.016874 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.017997 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-kuttl-api-log" containerID="cri-o://1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d" gracePeriod=30 Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.018121 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-api" containerID="cri-o://59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175" gracePeriod=30 Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.566617 4925 generic.go:334] "Generic (PLEG): container finished" podID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerID="1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d" exitCode=143 Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.566915 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-2" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-kuttl-api-log" containerID="cri-o://59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219" gracePeriod=30 Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.567482 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"3ad69b0e-0d3a-4c21-979e-9078059d7c95","Type":"ContainerDied","Data":"1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d"} Jan 21 11:36:13 crc kubenswrapper[4925]: I0121 11:36:13.567598 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-2" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-api" containerID="cri-o://729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926" gracePeriod=30 Jan 21 11:36:14 crc kubenswrapper[4925]: I0121 11:36:14.584162 4925 generic.go:334] "Generic (PLEG): container finished" podID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerID="59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219" exitCode=143 Jan 21 11:36:14 crc kubenswrapper[4925]: I0121 11:36:14.584255 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cb9f8189-67da-4024-983a-e7640b0e3bbf","Type":"ContainerDied","Data":"59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219"} Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.082167 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.181284 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-combined-ca-bundle\") pod \"cb9f8189-67da-4024-983a-e7640b0e3bbf\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.181541 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-cert-memcached-mtls\") pod \"cb9f8189-67da-4024-983a-e7640b0e3bbf\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.181619 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-custom-prometheus-ca\") pod \"cb9f8189-67da-4024-983a-e7640b0e3bbf\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.181670 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb9f8189-67da-4024-983a-e7640b0e3bbf-logs\") pod \"cb9f8189-67da-4024-983a-e7640b0e3bbf\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.181698 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-config-data\") pod \"cb9f8189-67da-4024-983a-e7640b0e3bbf\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.181743 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h822z\" (UniqueName: \"kubernetes.io/projected/cb9f8189-67da-4024-983a-e7640b0e3bbf-kube-api-access-h822z\") pod \"cb9f8189-67da-4024-983a-e7640b0e3bbf\" (UID: \"cb9f8189-67da-4024-983a-e7640b0e3bbf\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.193489 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb9f8189-67da-4024-983a-e7640b0e3bbf-logs" (OuterVolumeSpecName: "logs") pod "cb9f8189-67da-4024-983a-e7640b0e3bbf" (UID: "cb9f8189-67da-4024-983a-e7640b0e3bbf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.230377 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb9f8189-67da-4024-983a-e7640b0e3bbf-kube-api-access-h822z" (OuterVolumeSpecName: "kube-api-access-h822z") pod "cb9f8189-67da-4024-983a-e7640b0e3bbf" (UID: "cb9f8189-67da-4024-983a-e7640b0e3bbf"). InnerVolumeSpecName "kube-api-access-h822z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.268376 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "cb9f8189-67da-4024-983a-e7640b0e3bbf" (UID: "cb9f8189-67da-4024-983a-e7640b0e3bbf"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.270583 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb9f8189-67da-4024-983a-e7640b0e3bbf" (UID: "cb9f8189-67da-4024-983a-e7640b0e3bbf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.286093 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.286522 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb9f8189-67da-4024-983a-e7640b0e3bbf-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.286639 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h822z\" (UniqueName: \"kubernetes.io/projected/cb9f8189-67da-4024-983a-e7640b0e3bbf-kube-api-access-h822z\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.286778 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.299869 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-config-data" (OuterVolumeSpecName: "config-data") pod "cb9f8189-67da-4024-983a-e7640b0e3bbf" (UID: "cb9f8189-67da-4024-983a-e7640b0e3bbf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.332326 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "cb9f8189-67da-4024-983a-e7640b0e3bbf" (UID: "cb9f8189-67da-4024-983a-e7640b0e3bbf"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.334058 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.542959 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-cert-memcached-mtls\") pod \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.543016 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-custom-prometheus-ca\") pod \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.543107 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89stx\" (UniqueName: \"kubernetes.io/projected/3ad69b0e-0d3a-4c21-979e-9078059d7c95-kube-api-access-89stx\") pod \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.543153 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-combined-ca-bundle\") pod \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.543186 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-config-data\") pod \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.543247 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad69b0e-0d3a-4c21-979e-9078059d7c95-logs\") pod \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\" (UID: \"3ad69b0e-0d3a-4c21-979e-9078059d7c95\") " Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.543659 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.543676 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9f8189-67da-4024-983a-e7640b0e3bbf-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.544091 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ad69b0e-0d3a-4c21-979e-9078059d7c95-logs" (OuterVolumeSpecName: "logs") pod "3ad69b0e-0d3a-4c21-979e-9078059d7c95" (UID: "3ad69b0e-0d3a-4c21-979e-9078059d7c95"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.572721 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad69b0e-0d3a-4c21-979e-9078059d7c95-kube-api-access-89stx" (OuterVolumeSpecName: "kube-api-access-89stx") pod "3ad69b0e-0d3a-4c21-979e-9078059d7c95" (UID: "3ad69b0e-0d3a-4c21-979e-9078059d7c95"). InnerVolumeSpecName "kube-api-access-89stx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.592640 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ad69b0e-0d3a-4c21-979e-9078059d7c95" (UID: "3ad69b0e-0d3a-4c21-979e-9078059d7c95"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.610327 4925 generic.go:334] "Generic (PLEG): container finished" podID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerID="59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175" exitCode=0 Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.610486 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.613616 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "3ad69b0e-0d3a-4c21-979e-9078059d7c95" (UID: "3ad69b0e-0d3a-4c21-979e-9078059d7c95"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.624696 4925 generic.go:334] "Generic (PLEG): container finished" podID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerID="729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926" exitCode=0 Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.624812 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-2" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.649672 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89stx\" (UniqueName: \"kubernetes.io/projected/3ad69b0e-0d3a-4c21-979e-9078059d7c95-kube-api-access-89stx\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.649723 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.649735 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad69b0e-0d3a-4c21-979e-9078059d7c95-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.649748 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.686873 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"3ad69b0e-0d3a-4c21-979e-9078059d7c95","Type":"ContainerDied","Data":"59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175"} Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.686936 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"3ad69b0e-0d3a-4c21-979e-9078059d7c95","Type":"ContainerDied","Data":"db5376f1981766fb18e2179eb4e3e382e3ac7ee4df2f3a724f706a530b32dd2d"} Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.686955 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cb9f8189-67da-4024-983a-e7640b0e3bbf","Type":"ContainerDied","Data":"729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926"} Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.686972 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-2" event={"ID":"cb9f8189-67da-4024-983a-e7640b0e3bbf","Type":"ContainerDied","Data":"6fe40802bfa012e896ead542486470844d6e1c2f0337609e5685805870cfa13e"} Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.686999 4925 scope.go:117] "RemoveContainer" containerID="59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.792701 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "3ad69b0e-0d3a-4c21-979e-9078059d7c95" (UID: "3ad69b0e-0d3a-4c21-979e-9078059d7c95"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.792737 4925 scope.go:117] "RemoveContainer" containerID="1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.800832 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.810572 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-config-data" (OuterVolumeSpecName: "config-data") pod "3ad69b0e-0d3a-4c21-979e-9078059d7c95" (UID: "3ad69b0e-0d3a-4c21-979e-9078059d7c95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.830143 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-2"] Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.843452 4925 scope.go:117] "RemoveContainer" containerID="59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175" Jan 21 11:36:15 crc kubenswrapper[4925]: E0121 11:36:15.845316 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175\": container with ID starting with 59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175 not found: ID does not exist" containerID="59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.845369 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175"} err="failed to get container status \"59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175\": rpc error: code = NotFound desc = could not find container \"59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175\": container with ID starting with 59869e1ec46a026b9fc37947bc3007172da31fc2400fb31318a95654c1ce6175 not found: ID does not exist" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.845428 4925 scope.go:117] "RemoveContainer" containerID="1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d" Jan 21 11:36:15 crc kubenswrapper[4925]: E0121 11:36:15.851759 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d\": container with ID starting with 1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d not found: ID does not exist" containerID="1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.852068 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d"} err="failed to get container status \"1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d\": rpc error: code = NotFound desc = could not find container \"1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d\": container with ID starting with 1d584ea8d2fe06e46a05db8425c5e349b6514b3f12d2cbca242596b1e40feb7d not found: ID does not exist" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.852102 4925 scope.go:117] "RemoveContainer" containerID="729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.854477 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.854530 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/3ad69b0e-0d3a-4c21-979e-9078059d7c95-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.905295 4925 scope.go:117] "RemoveContainer" containerID="59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219" Jan 21 11:36:15 crc kubenswrapper[4925]: E0121 11:36:15.925111 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb9f8189_67da_4024_983a_e7640b0e3bbf.slice/crio-6fe40802bfa012e896ead542486470844d6e1c2f0337609e5685805870cfa13e\": RecentStats: unable to find data in memory cache]" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.971996 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.982993 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.984867 4925 scope.go:117] "RemoveContainer" containerID="729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926" Jan 21 11:36:15 crc kubenswrapper[4925]: E0121 11:36:15.988457 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926\": container with ID starting with 729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926 not found: ID does not exist" containerID="729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.988513 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926"} err="failed to get container status \"729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926\": rpc error: code = NotFound desc = could not find container \"729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926\": container with ID starting with 729a9f666beae31e8366d4188e6ba22afa86e6eeba5eb753213fd065832a4926 not found: ID does not exist" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.988554 4925 scope.go:117] "RemoveContainer" containerID="59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219" Jan 21 11:36:15 crc kubenswrapper[4925]: E0121 11:36:15.989281 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219\": container with ID starting with 59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219 not found: ID does not exist" containerID="59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219" Jan 21 11:36:15 crc kubenswrapper[4925]: I0121 11:36:15.989338 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219"} err="failed to get container status \"59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219\": rpc error: code = NotFound desc = could not find container \"59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219\": container with ID starting with 59f973f73175e1a0adc5fae14abfa565b648bded865118d2a9418c8885381219 not found: ID does not exist" Jan 21 11:36:16 crc kubenswrapper[4925]: I0121 11:36:16.497108 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:36:16 crc kubenswrapper[4925]: I0121 11:36:16.497860 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-api" containerID="cri-o://3aeca06d17186a01a51f6eadfcf236ee3433b821adf2546ac831dd0c53deef69" gracePeriod=30 Jan 21 11:36:16 crc kubenswrapper[4925]: I0121 11:36:16.498080 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-kuttl-api-log" containerID="cri-o://1b9b8bd03c445b15ed32d8e85c4fdc527a791572237f5990164e237a66160acf" gracePeriod=30 Jan 21 11:36:16 crc kubenswrapper[4925]: I0121 11:36:16.649547 4925 generic.go:334] "Generic (PLEG): container finished" podID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerID="1b9b8bd03c445b15ed32d8e85c4fdc527a791572237f5990164e237a66160acf" exitCode=143 Jan 21 11:36:16 crc kubenswrapper[4925]: I0121 11:36:16.649609 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"38b8bc42-83a8-4f30-a976-d7ba8c7eec87","Type":"ContainerDied","Data":"1b9b8bd03c445b15ed32d8e85c4fdc527a791572237f5990164e237a66160acf"} Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.530173 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" path="/var/lib/kubelet/pods/3ad69b0e-0d3a-4c21-979e-9078059d7c95/volumes" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.530901 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" path="/var/lib/kubelet/pods/cb9f8189-67da-4024-983a-e7640b0e3bbf/volumes" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.705775 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-n5glp"] Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.716281 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-n5glp"] Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.769640 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.769942 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="efdd6f82-fbae-41fa-a61d-f92e9729b3c3" containerName="watcher-decision-engine" containerID="cri-o://383300fd3d490560750ef8967910f199d11e380624791502c07b8e87e225c821" gracePeriod=30 Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.779344 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watchere692-account-delete-qngtc"] Jan 21 11:36:17 crc kubenswrapper[4925]: E0121 11:36:17.779848 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-api" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.779877 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-api" Jan 21 11:36:17 crc kubenswrapper[4925]: E0121 11:36:17.779899 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-kuttl-api-log" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.779908 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-kuttl-api-log" Jan 21 11:36:17 crc kubenswrapper[4925]: E0121 11:36:17.779938 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-api" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.779946 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-api" Jan 21 11:36:17 crc kubenswrapper[4925]: E0121 11:36:17.779958 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-kuttl-api-log" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.779965 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-kuttl-api-log" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.780176 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-api" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.780232 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-kuttl-api-log" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.780247 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-api" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.780266 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb9f8189-67da-4024-983a-e7640b0e3bbf" containerName="watcher-kuttl-api-log" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.780950 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.793126 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watchere692-account-delete-qngtc"] Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.806032 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.806345 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="a28fde9c-8bf6-4e3e-9721-7f4507fcc815" containerName="watcher-applier" containerID="cri-o://2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1" gracePeriod=30 Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.833646 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jflg2\" (UniqueName: \"kubernetes.io/projected/00449f13-af2e-4b31-9be2-d2311ef8956d-kube-api-access-jflg2\") pod \"watchere692-account-delete-qngtc\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.833843 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00449f13-af2e-4b31-9be2-d2311ef8956d-operator-scripts\") pod \"watchere692-account-delete-qngtc\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.937442 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jflg2\" (UniqueName: \"kubernetes.io/projected/00449f13-af2e-4b31-9be2-d2311ef8956d-kube-api-access-jflg2\") pod \"watchere692-account-delete-qngtc\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.937775 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00449f13-af2e-4b31-9be2-d2311ef8956d-operator-scripts\") pod \"watchere692-account-delete-qngtc\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.940884 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00449f13-af2e-4b31-9be2-d2311ef8956d-operator-scripts\") pod \"watchere692-account-delete-qngtc\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:17 crc kubenswrapper[4925]: I0121 11:36:17.972199 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jflg2\" (UniqueName: \"kubernetes.io/projected/00449f13-af2e-4b31-9be2-d2311ef8956d-kube-api-access-jflg2\") pod \"watchere692-account-delete-qngtc\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:18 crc kubenswrapper[4925]: I0121 11:36:18.105611 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:18 crc kubenswrapper[4925]: I0121 11:36:18.807595 4925 generic.go:334] "Generic (PLEG): container finished" podID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerID="3aeca06d17186a01a51f6eadfcf236ee3433b821adf2546ac831dd0c53deef69" exitCode=0 Jan 21 11:36:18 crc kubenswrapper[4925]: I0121 11:36:18.807935 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"38b8bc42-83a8-4f30-a976-d7ba8c7eec87","Type":"ContainerDied","Data":"3aeca06d17186a01a51f6eadfcf236ee3433b821adf2546ac831dd0c53deef69"} Jan 21 11:36:18 crc kubenswrapper[4925]: I0121 11:36:18.850966 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watchere692-account-delete-qngtc"] Jan 21 11:36:18 crc kubenswrapper[4925]: W0121 11:36:18.898414 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00449f13_af2e_4b31_9be2_d2311ef8956d.slice/crio-16eba8b08297ed38ccb793f55f80b7e22b9885e7ab9b1878a8d408aed41acd1e WatchSource:0}: Error finding container 16eba8b08297ed38ccb793f55f80b7e22b9885e7ab9b1878a8d408aed41acd1e: Status 404 returned error can't find the container with id 16eba8b08297ed38ccb793f55f80b7e22b9885e7ab9b1878a8d408aed41acd1e Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.130522 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.380198 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2pwj\" (UniqueName: \"kubernetes.io/projected/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-kube-api-access-c2pwj\") pod \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.380381 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-combined-ca-bundle\") pod \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.380796 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-custom-prometheus-ca\") pod \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.380961 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-cert-memcached-mtls\") pod \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.381034 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-logs\") pod \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.381098 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-config-data\") pod \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\" (UID: \"38b8bc42-83a8-4f30-a976-d7ba8c7eec87\") " Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.381648 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-logs" (OuterVolumeSpecName: "logs") pod "38b8bc42-83a8-4f30-a976-d7ba8c7eec87" (UID: "38b8bc42-83a8-4f30-a976-d7ba8c7eec87"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.387315 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.410665 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-kube-api-access-c2pwj" (OuterVolumeSpecName: "kube-api-access-c2pwj") pod "38b8bc42-83a8-4f30-a976-d7ba8c7eec87" (UID: "38b8bc42-83a8-4f30-a976-d7ba8c7eec87"). InnerVolumeSpecName "kube-api-access-c2pwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.434039 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38b8bc42-83a8-4f30-a976-d7ba8c7eec87" (UID: "38b8bc42-83a8-4f30-a976-d7ba8c7eec87"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.461055 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "38b8bc42-83a8-4f30-a976-d7ba8c7eec87" (UID: "38b8bc42-83a8-4f30-a976-d7ba8c7eec87"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.464696 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-config-data" (OuterVolumeSpecName: "config-data") pod "38b8bc42-83a8-4f30-a976-d7ba8c7eec87" (UID: "38b8bc42-83a8-4f30-a976-d7ba8c7eec87"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.476851 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "38b8bc42-83a8-4f30-a976-d7ba8c7eec87" (UID: "38b8bc42-83a8-4f30-a976-d7ba8c7eec87"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.490332 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.490426 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2pwj\" (UniqueName: \"kubernetes.io/projected/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-kube-api-access-c2pwj\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.490454 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.490468 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.490479 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b8bc42-83a8-4f30-a976-d7ba8c7eec87-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.520448 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2a51189-7332-4b67-81cf-6c974069f0f7" path="/var/lib/kubelet/pods/c2a51189-7332-4b67-81cf-6c974069f0f7/volumes" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.820240 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" event={"ID":"00449f13-af2e-4b31-9be2-d2311ef8956d","Type":"ContainerStarted","Data":"c0c0e93fe76e9cbd3775b519bb8b929708723b807151a63898ee378d2a72b673"} Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.820668 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" event={"ID":"00449f13-af2e-4b31-9be2-d2311ef8956d","Type":"ContainerStarted","Data":"16eba8b08297ed38ccb793f55f80b7e22b9885e7ab9b1878a8d408aed41acd1e"} Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.824318 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"38b8bc42-83a8-4f30-a976-d7ba8c7eec87","Type":"ContainerDied","Data":"da7af09bc67c06956da70409f46e5a214a9585d6a600eee9184a030542d2241b"} Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.824415 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.824429 4925 scope.go:117] "RemoveContainer" containerID="3aeca06d17186a01a51f6eadfcf236ee3433b821adf2546ac831dd0c53deef69" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.853036 4925 scope.go:117] "RemoveContainer" containerID="1b9b8bd03c445b15ed32d8e85c4fdc527a791572237f5990164e237a66160acf" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.870632 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" podStartSLOduration=2.870610588 podStartE2EDuration="2.870610588s" podCreationTimestamp="2026-01-21 11:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:36:19.843773121 +0000 UTC m=+2471.447665055" watchObservedRunningTime="2026-01-21 11:36:19.870610588 +0000 UTC m=+2471.474502522" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.876162 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.886346 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.941489 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.941576 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.941632 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.942565 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:36:19 crc kubenswrapper[4925]: I0121 11:36:19.942690 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" gracePeriod=600 Jan 21 11:36:20 crc kubenswrapper[4925]: I0121 11:36:20.355563 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.209:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 11:36:20 crc kubenswrapper[4925]: I0121 11:36:20.356448 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="3ad69b0e-0d3a-4c21-979e-9078059d7c95" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.209:9322/\": dial tcp 10.217.0.209:9322: i/o timeout (Client.Timeout exceeded while awaiting headers)" Jan 21 11:36:20 crc kubenswrapper[4925]: E0121 11:36:20.534766 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:36:20 crc kubenswrapper[4925]: E0121 11:36:20.627992 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:36:20 crc kubenswrapper[4925]: E0121 11:36:20.629785 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:36:20 crc kubenswrapper[4925]: E0121 11:36:20.631232 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:36:20 crc kubenswrapper[4925]: E0121 11:36:20.631383 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="a28fde9c-8bf6-4e3e-9721-7f4507fcc815" containerName="watcher-applier" Jan 21 11:36:20 crc kubenswrapper[4925]: I0121 11:36:20.855168 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" exitCode=0 Jan 21 11:36:20 crc kubenswrapper[4925]: I0121 11:36:20.855219 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e"} Jan 21 11:36:20 crc kubenswrapper[4925]: I0121 11:36:20.856568 4925 scope.go:117] "RemoveContainer" containerID="14144a36600bd7d5b9a71777ea7bcad1b2af7e52667e89f48ae846cc78fbbc2d" Jan 21 11:36:20 crc kubenswrapper[4925]: I0121 11:36:20.857368 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:36:20 crc kubenswrapper[4925]: E0121 11:36:20.857750 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:36:21 crc kubenswrapper[4925]: I0121 11:36:21.515116 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" path="/var/lib/kubelet/pods/38b8bc42-83a8-4f30-a976-d7ba8c7eec87/volumes" Jan 21 11:36:21 crc kubenswrapper[4925]: I0121 11:36:21.733288 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:21 crc kubenswrapper[4925]: I0121 11:36:21.880227 4925 generic.go:334] "Generic (PLEG): container finished" podID="00449f13-af2e-4b31-9be2-d2311ef8956d" containerID="c0c0e93fe76e9cbd3775b519bb8b929708723b807151a63898ee378d2a72b673" exitCode=0 Jan 21 11:36:21 crc kubenswrapper[4925]: I0121 11:36:21.880602 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" event={"ID":"00449f13-af2e-4b31-9be2-d2311ef8956d","Type":"ContainerDied","Data":"c0c0e93fe76e9cbd3775b519bb8b929708723b807151a63898ee378d2a72b673"} Jan 21 11:36:21 crc kubenswrapper[4925]: I0121 11:36:21.883369 4925 generic.go:334] "Generic (PLEG): container finished" podID="a28fde9c-8bf6-4e3e-9721-7f4507fcc815" containerID="2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1" exitCode=0 Jan 21 11:36:21 crc kubenswrapper[4925]: I0121 11:36:21.883434 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a28fde9c-8bf6-4e3e-9721-7f4507fcc815","Type":"ContainerDied","Data":"2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1"} Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.415518 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.483029 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-combined-ca-bundle\") pod \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.483163 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-config-data\") pod \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.483196 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-cert-memcached-mtls\") pod \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.483270 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nck4w\" (UniqueName: \"kubernetes.io/projected/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-kube-api-access-nck4w\") pod \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.483304 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-logs\") pod \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\" (UID: \"a28fde9c-8bf6-4e3e-9721-7f4507fcc815\") " Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.485631 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-logs" (OuterVolumeSpecName: "logs") pod "a28fde9c-8bf6-4e3e-9721-7f4507fcc815" (UID: "a28fde9c-8bf6-4e3e-9721-7f4507fcc815"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.511281 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-kube-api-access-nck4w" (OuterVolumeSpecName: "kube-api-access-nck4w") pod "a28fde9c-8bf6-4e3e-9721-7f4507fcc815" (UID: "a28fde9c-8bf6-4e3e-9721-7f4507fcc815"). InnerVolumeSpecName "kube-api-access-nck4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.517520 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a28fde9c-8bf6-4e3e-9721-7f4507fcc815" (UID: "a28fde9c-8bf6-4e3e-9721-7f4507fcc815"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.567533 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-config-data" (OuterVolumeSpecName: "config-data") pod "a28fde9c-8bf6-4e3e-9721-7f4507fcc815" (UID: "a28fde9c-8bf6-4e3e-9721-7f4507fcc815"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.582524 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "a28fde9c-8bf6-4e3e-9721-7f4507fcc815" (UID: "a28fde9c-8bf6-4e3e-9721-7f4507fcc815"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.585579 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.585624 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.585637 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nck4w\" (UniqueName: \"kubernetes.io/projected/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-kube-api-access-nck4w\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.585647 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.585743 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a28fde9c-8bf6-4e3e-9721-7f4507fcc815-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.902663 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"a28fde9c-8bf6-4e3e-9721-7f4507fcc815","Type":"ContainerDied","Data":"eb43523d7c91fa030b09afe77215f622f72e6cad162df718744b756d6384c9f2"} Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.903182 4925 scope.go:117] "RemoveContainer" containerID="2e6fdb7b69fd09a26326c729e83c0aa0bf9a552f94402a4cd2b0e74a3d4e17e1" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.902809 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:22 crc kubenswrapper[4925]: I0121 11:36:22.965712 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:36:23 crc kubenswrapper[4925]: I0121 11:36:23.103336 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:36:23 crc kubenswrapper[4925]: I0121 11:36:23.530066 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a28fde9c-8bf6-4e3e-9721-7f4507fcc815" path="/var/lib/kubelet/pods/a28fde9c-8bf6-4e3e-9721-7f4507fcc815/volumes" Jan 21 11:36:23 crc kubenswrapper[4925]: I0121 11:36:23.981167 4925 generic.go:334] "Generic (PLEG): container finished" podID="efdd6f82-fbae-41fa-a61d-f92e9729b3c3" containerID="383300fd3d490560750ef8967910f199d11e380624791502c07b8e87e225c821" exitCode=0 Jan 21 11:36:23 crc kubenswrapper[4925]: I0121 11:36:23.981289 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"efdd6f82-fbae-41fa-a61d-f92e9729b3c3","Type":"ContainerDied","Data":"383300fd3d490560750ef8967910f199d11e380624791502c07b8e87e225c821"} Jan 21 11:36:23 crc kubenswrapper[4925]: I0121 11:36:23.989102 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.021943 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" event={"ID":"00449f13-af2e-4b31-9be2-d2311ef8956d","Type":"ContainerDied","Data":"16eba8b08297ed38ccb793f55f80b7e22b9885e7ab9b1878a8d408aed41acd1e"} Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.021986 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16eba8b08297ed38ccb793f55f80b7e22b9885e7ab9b1878a8d408aed41acd1e" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.022076 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchere692-account-delete-qngtc" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.044134 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jflg2\" (UniqueName: \"kubernetes.io/projected/00449f13-af2e-4b31-9be2-d2311ef8956d-kube-api-access-jflg2\") pod \"00449f13-af2e-4b31-9be2-d2311ef8956d\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.044247 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00449f13-af2e-4b31-9be2-d2311ef8956d-operator-scripts\") pod \"00449f13-af2e-4b31-9be2-d2311ef8956d\" (UID: \"00449f13-af2e-4b31-9be2-d2311ef8956d\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.046376 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00449f13-af2e-4b31-9be2-d2311ef8956d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "00449f13-af2e-4b31-9be2-d2311ef8956d" (UID: "00449f13-af2e-4b31-9be2-d2311ef8956d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.047063 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00449f13-af2e-4b31-9be2-d2311ef8956d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.083664 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00449f13-af2e-4b31-9be2-d2311ef8956d-kube-api-access-jflg2" (OuterVolumeSpecName: "kube-api-access-jflg2") pod "00449f13-af2e-4b31-9be2-d2311ef8956d" (UID: "00449f13-af2e-4b31-9be2-d2311ef8956d"). InnerVolumeSpecName "kube-api-access-jflg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.149673 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jflg2\" (UniqueName: \"kubernetes.io/projected/00449f13-af2e-4b31-9be2-d2311ef8956d-kube-api-access-jflg2\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.597475 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.717232 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-logs\") pod \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.717502 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-combined-ca-bundle\") pod \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.717559 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-custom-prometheus-ca\") pod \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.717592 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-cert-memcached-mtls\") pod \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.717612 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-logs" (OuterVolumeSpecName: "logs") pod "efdd6f82-fbae-41fa-a61d-f92e9729b3c3" (UID: "efdd6f82-fbae-41fa-a61d-f92e9729b3c3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.717665 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-config-data\") pod \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.717806 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-875pw\" (UniqueName: \"kubernetes.io/projected/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-kube-api-access-875pw\") pod \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\" (UID: \"efdd6f82-fbae-41fa-a61d-f92e9729b3c3\") " Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.719049 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.722129 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-kube-api-access-875pw" (OuterVolumeSpecName: "kube-api-access-875pw") pod "efdd6f82-fbae-41fa-a61d-f92e9729b3c3" (UID: "efdd6f82-fbae-41fa-a61d-f92e9729b3c3"). InnerVolumeSpecName "kube-api-access-875pw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.746606 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "efdd6f82-fbae-41fa-a61d-f92e9729b3c3" (UID: "efdd6f82-fbae-41fa-a61d-f92e9729b3c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.756599 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "efdd6f82-fbae-41fa-a61d-f92e9729b3c3" (UID: "efdd6f82-fbae-41fa-a61d-f92e9729b3c3"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.773782 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-config-data" (OuterVolumeSpecName: "config-data") pod "efdd6f82-fbae-41fa-a61d-f92e9729b3c3" (UID: "efdd6f82-fbae-41fa-a61d-f92e9729b3c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.797250 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "efdd6f82-fbae-41fa-a61d-f92e9729b3c3" (UID: "efdd6f82-fbae-41fa-a61d-f92e9729b3c3"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.820373 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.820995 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.821014 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.821024 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:24 crc kubenswrapper[4925]: I0121 11:36:24.821036 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-875pw\" (UniqueName: \"kubernetes.io/projected/efdd6f82-fbae-41fa-a61d-f92e9729b3c3-kube-api-access-875pw\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.034410 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"efdd6f82-fbae-41fa-a61d-f92e9729b3c3","Type":"ContainerDied","Data":"fa5a0518586966a28c7024268a32c1bc09183e560e291add84f94eb233815d86"} Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.034479 4925 scope.go:117] "RemoveContainer" containerID="383300fd3d490560750ef8967910f199d11e380624791502c07b8e87e225c821" Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.034486 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.072079 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.081174 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.336356 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.336855 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-central-agent" containerID="cri-o://a4873bd8419f25628a4d06624c9c7af93a0d117dd6763d940ffccbdb07370bea" gracePeriod=30 Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.337477 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="proxy-httpd" containerID="cri-o://426fed9c2aea6aad00a0d70d99694643eb8d7aa0b1b803485ef89e80691ae95e" gracePeriod=30 Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.337558 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="sg-core" containerID="cri-o://55cd76f2a9fa1e5b5b216e1542a7350fc67a4c8c120528918d53361e1d5fb46e" gracePeriod=30 Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.337617 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-notification-agent" containerID="cri-o://2d41dd849a503bf131acf8dc139591987bac9a8034f9c4781b570f088537291c" gracePeriod=30 Jan 21 11:36:25 crc kubenswrapper[4925]: I0121 11:36:25.513734 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd6f82-fbae-41fa-a61d-f92e9729b3c3" path="/var/lib/kubelet/pods/efdd6f82-fbae-41fa-a61d-f92e9729b3c3/volumes" Jan 21 11:36:26 crc kubenswrapper[4925]: I0121 11:36:26.058755 4925 generic.go:334] "Generic (PLEG): container finished" podID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerID="426fed9c2aea6aad00a0d70d99694643eb8d7aa0b1b803485ef89e80691ae95e" exitCode=0 Jan 21 11:36:26 crc kubenswrapper[4925]: I0121 11:36:26.058806 4925 generic.go:334] "Generic (PLEG): container finished" podID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerID="55cd76f2a9fa1e5b5b216e1542a7350fc67a4c8c120528918d53361e1d5fb46e" exitCode=2 Jan 21 11:36:26 crc kubenswrapper[4925]: I0121 11:36:26.058908 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerDied","Data":"426fed9c2aea6aad00a0d70d99694643eb8d7aa0b1b803485ef89e80691ae95e"} Jan 21 11:36:26 crc kubenswrapper[4925]: I0121 11:36:26.058965 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerDied","Data":"55cd76f2a9fa1e5b5b216e1542a7350fc67a4c8c120528918d53361e1d5fb46e"} Jan 21 11:36:27 crc kubenswrapper[4925]: I0121 11:36:27.086719 4925 generic.go:334] "Generic (PLEG): container finished" podID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerID="a4873bd8419f25628a4d06624c9c7af93a0d117dd6763d940ffccbdb07370bea" exitCode=0 Jan 21 11:36:27 crc kubenswrapper[4925]: I0121 11:36:27.086834 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerDied","Data":"a4873bd8419f25628a4d06624c9c7af93a0d117dd6763d940ffccbdb07370bea"} Jan 21 11:36:27 crc kubenswrapper[4925]: I0121 11:36:27.980907 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dgm48"] Jan 21 11:36:27 crc kubenswrapper[4925]: I0121 11:36:27.988100 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-dgm48"] Jan 21 11:36:27 crc kubenswrapper[4925]: I0121 11:36:27.998405 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watchere692-account-delete-qngtc"] Jan 21 11:36:28 crc kubenswrapper[4925]: I0121 11:36:28.012239 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-e692-account-create-update-cdpr4"] Jan 21 11:36:28 crc kubenswrapper[4925]: I0121 11:36:28.019116 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watchere692-account-delete-qngtc"] Jan 21 11:36:28 crc kubenswrapper[4925]: I0121 11:36:28.025317 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-e692-account-create-update-cdpr4"] Jan 21 11:36:29 crc kubenswrapper[4925]: I0121 11:36:29.516009 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00449f13-af2e-4b31-9be2-d2311ef8956d" path="/var/lib/kubelet/pods/00449f13-af2e-4b31-9be2-d2311ef8956d/volumes" Jan 21 11:36:29 crc kubenswrapper[4925]: I0121 11:36:29.517236 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8" path="/var/lib/kubelet/pods/8ea99ecc-e365-44f6-9e59-b4a4bea0d1d8/volumes" Jan 21 11:36:29 crc kubenswrapper[4925]: I0121 11:36:29.517990 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ceed0c36-8aad-46f0-ae96-7d370522f137" path="/var/lib/kubelet/pods/ceed0c36-8aad-46f0-ae96-7d370522f137/volumes" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.123927 4925 generic.go:334] "Generic (PLEG): container finished" podID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerID="2d41dd849a503bf131acf8dc139591987bac9a8034f9c4781b570f088537291c" exitCode=0 Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.124097 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerDied","Data":"2d41dd849a503bf131acf8dc139591987bac9a8034f9c4781b570f088537291c"} Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.124343 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3daf4d63-4a4e-460a-bdd3-14add115e2b2","Type":"ContainerDied","Data":"519b4a476f3bea6ba4b0c2f7ab815920a1b9f74975b816167c9a1f49b8020af9"} Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.124365 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="519b4a476f3bea6ba4b0c2f7ab815920a1b9f74975b816167c9a1f49b8020af9" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.161238 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.305906 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-config-data\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.306007 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-combined-ca-bundle\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.306056 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-scripts\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.306116 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lqbx\" (UniqueName: \"kubernetes.io/projected/3daf4d63-4a4e-460a-bdd3-14add115e2b2-kube-api-access-2lqbx\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.306189 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-run-httpd\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.306228 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-log-httpd\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.306287 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-ceilometer-tls-certs\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.306329 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-sg-core-conf-yaml\") pod \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\" (UID: \"3daf4d63-4a4e-460a-bdd3-14add115e2b2\") " Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.307937 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.308345 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.314200 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-scripts" (OuterVolumeSpecName: "scripts") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.332487 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3daf4d63-4a4e-460a-bdd3-14add115e2b2-kube-api-access-2lqbx" (OuterVolumeSpecName: "kube-api-access-2lqbx") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "kube-api-access-2lqbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.531282 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.531320 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lqbx\" (UniqueName: \"kubernetes.io/projected/3daf4d63-4a4e-460a-bdd3-14add115e2b2-kube-api-access-2lqbx\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.531336 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.531348 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3daf4d63-4a4e-460a-bdd3-14add115e2b2-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.534000 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.541234 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.560001 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.605654 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-config-data" (OuterVolumeSpecName: "config-data") pod "3daf4d63-4a4e-460a-bdd3-14add115e2b2" (UID: "3daf4d63-4a4e-460a-bdd3-14add115e2b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.632819 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.633195 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.633281 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:30 crc kubenswrapper[4925]: I0121 11:36:30.633368 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3daf4d63-4a4e-460a-bdd3-14add115e2b2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.132012 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.169183 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.178306 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.200873 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201330 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="sg-core" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201359 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="sg-core" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201385 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="proxy-httpd" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201414 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="proxy-httpd" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201423 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28fde9c-8bf6-4e3e-9721-7f4507fcc815" containerName="watcher-applier" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201432 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28fde9c-8bf6-4e3e-9721-7f4507fcc815" containerName="watcher-applier" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201445 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-central-agent" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201453 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-central-agent" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201466 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-kuttl-api-log" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201473 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-kuttl-api-log" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201491 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-notification-agent" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201498 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-notification-agent" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201516 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00449f13-af2e-4b31-9be2-d2311ef8956d" containerName="mariadb-account-delete" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201525 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="00449f13-af2e-4b31-9be2-d2311ef8956d" containerName="mariadb-account-delete" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201535 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efdd6f82-fbae-41fa-a61d-f92e9729b3c3" containerName="watcher-decision-engine" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201542 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="efdd6f82-fbae-41fa-a61d-f92e9729b3c3" containerName="watcher-decision-engine" Jan 21 11:36:31 crc kubenswrapper[4925]: E0121 11:36:31.201554 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-api" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201562 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-api" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201781 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-api" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201801 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="efdd6f82-fbae-41fa-a61d-f92e9729b3c3" containerName="watcher-decision-engine" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201814 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="proxy-httpd" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201826 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-notification-agent" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201842 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="ceilometer-central-agent" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201853 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="00449f13-af2e-4b31-9be2-d2311ef8956d" containerName="mariadb-account-delete" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201871 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="38b8bc42-83a8-4f30-a976-d7ba8c7eec87" containerName="watcher-kuttl-api-log" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201884 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="a28fde9c-8bf6-4e3e-9721-7f4507fcc815" containerName="watcher-applier" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.201897 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" containerName="sg-core" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.203987 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.207249 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.207847 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.208004 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.220513 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245305 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245565 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-run-httpd\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245600 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245642 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-config-data\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245701 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245731 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmr46\" (UniqueName: \"kubernetes.io/projected/f5a286aa-0a25-4bfc-b5b6-c38d70648300-kube-api-access-rmr46\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245785 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-log-httpd\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.245811 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-scripts\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.347245 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.347298 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmr46\" (UniqueName: \"kubernetes.io/projected/f5a286aa-0a25-4bfc-b5b6-c38d70648300-kube-api-access-rmr46\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.347348 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-log-httpd\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.347980 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-scripts\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.347917 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-log-httpd\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.348075 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.348142 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-run-httpd\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.348578 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.348617 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-config-data\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.348677 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-run-httpd\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.353993 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.354004 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.355352 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-scripts\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.355906 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.357211 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-config-data\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.371622 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmr46\" (UniqueName: \"kubernetes.io/projected/f5a286aa-0a25-4bfc-b5b6-c38d70648300-kube-api-access-rmr46\") pod \"ceilometer-0\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.518534 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3daf4d63-4a4e-460a-bdd3-14add115e2b2" path="/var/lib/kubelet/pods/3daf4d63-4a4e-460a-bdd3-14add115e2b2/volumes" Jan 21 11:36:31 crc kubenswrapper[4925]: I0121 11:36:31.527588 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.046711 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.147573 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerStarted","Data":"e3ec47fd35cff81315868557d82392ac25d562fa00104c830d97df4ffc2e9713"} Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.617588 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:36:32 crc kubenswrapper[4925]: E0121 11:36:32.618292 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.667059 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-q664r"] Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.673215 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.694525 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q664r"] Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.781828 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-6761-account-create-update-8lfh8"] Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.783374 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.787970 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.797149 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-6761-account-create-update-8lfh8"] Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.824867 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ph7d\" (UniqueName: \"kubernetes.io/projected/e8275c69-592d-4a37-a63b-5797f3d156f4-kube-api-access-5ph7d\") pod \"watcher-db-create-q664r\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.824989 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8275c69-592d-4a37-a63b-5797f3d156f4-operator-scripts\") pod \"watcher-db-create-q664r\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.927048 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8275c69-592d-4a37-a63b-5797f3d156f4-operator-scripts\") pod \"watcher-db-create-q664r\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.927170 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh657\" (UniqueName: \"kubernetes.io/projected/d621d4c5-909d-4fc8-8113-d898b0f87caf-kube-api-access-jh657\") pod \"watcher-6761-account-create-update-8lfh8\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.927280 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ph7d\" (UniqueName: \"kubernetes.io/projected/e8275c69-592d-4a37-a63b-5797f3d156f4-kube-api-access-5ph7d\") pod \"watcher-db-create-q664r\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.927370 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d621d4c5-909d-4fc8-8113-d898b0f87caf-operator-scripts\") pod \"watcher-6761-account-create-update-8lfh8\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.928220 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8275c69-592d-4a37-a63b-5797f3d156f4-operator-scripts\") pod \"watcher-db-create-q664r\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:32 crc kubenswrapper[4925]: I0121 11:36:32.954652 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ph7d\" (UniqueName: \"kubernetes.io/projected/e8275c69-592d-4a37-a63b-5797f3d156f4-kube-api-access-5ph7d\") pod \"watcher-db-create-q664r\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.002646 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.030169 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d621d4c5-909d-4fc8-8113-d898b0f87caf-operator-scripts\") pod \"watcher-6761-account-create-update-8lfh8\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.031209 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d621d4c5-909d-4fc8-8113-d898b0f87caf-operator-scripts\") pod \"watcher-6761-account-create-update-8lfh8\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.033099 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh657\" (UniqueName: \"kubernetes.io/projected/d621d4c5-909d-4fc8-8113-d898b0f87caf-kube-api-access-jh657\") pod \"watcher-6761-account-create-update-8lfh8\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.057556 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh657\" (UniqueName: \"kubernetes.io/projected/d621d4c5-909d-4fc8-8113-d898b0f87caf-kube-api-access-jh657\") pod \"watcher-6761-account-create-update-8lfh8\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.115346 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.527477 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q664r"] Jan 21 11:36:33 crc kubenswrapper[4925]: I0121 11:36:33.923576 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-6761-account-create-update-8lfh8"] Jan 21 11:36:33 crc kubenswrapper[4925]: W0121 11:36:33.936349 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd621d4c5_909d_4fc8_8113_d898b0f87caf.slice/crio-28afffeec039529c1db248b6b5ee21cf0c910548b512dd242561b53ececd610f WatchSource:0}: Error finding container 28afffeec039529c1db248b6b5ee21cf0c910548b512dd242561b53ececd610f: Status 404 returned error can't find the container with id 28afffeec039529c1db248b6b5ee21cf0c910548b512dd242561b53ececd610f Jan 21 11:36:34 crc kubenswrapper[4925]: I0121 11:36:34.487013 4925 generic.go:334] "Generic (PLEG): container finished" podID="e8275c69-592d-4a37-a63b-5797f3d156f4" containerID="7359f9cad1c1cf7ec0b32e455553d9d3b3ad9148115ee60a183c032da0d9f0b9" exitCode=0 Jan 21 11:36:34 crc kubenswrapper[4925]: I0121 11:36:34.487119 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-q664r" event={"ID":"e8275c69-592d-4a37-a63b-5797f3d156f4","Type":"ContainerDied","Data":"7359f9cad1c1cf7ec0b32e455553d9d3b3ad9148115ee60a183c032da0d9f0b9"} Jan 21 11:36:34 crc kubenswrapper[4925]: I0121 11:36:34.487170 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-q664r" event={"ID":"e8275c69-592d-4a37-a63b-5797f3d156f4","Type":"ContainerStarted","Data":"08b948a4fb0598b87302bb16b698c01af16d3126dbdaa5f1036c6d342054dd64"} Jan 21 11:36:34 crc kubenswrapper[4925]: I0121 11:36:34.495078 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerStarted","Data":"2fce1b41fadfa18a23dc358b1e9d9f5222ebac9f3f5075a02b80593064c37b9c"} Jan 21 11:36:34 crc kubenswrapper[4925]: I0121 11:36:34.498121 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" event={"ID":"d621d4c5-909d-4fc8-8113-d898b0f87caf","Type":"ContainerStarted","Data":"89e6319de72d495e73439f260b648b87927152fb793b072e3744077e4be2fb2d"} Jan 21 11:36:34 crc kubenswrapper[4925]: I0121 11:36:34.498185 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" event={"ID":"d621d4c5-909d-4fc8-8113-d898b0f87caf","Type":"ContainerStarted","Data":"28afffeec039529c1db248b6b5ee21cf0c910548b512dd242561b53ececd610f"} Jan 21 11:36:34 crc kubenswrapper[4925]: I0121 11:36:34.540575 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" podStartSLOduration=2.540550005 podStartE2EDuration="2.540550005s" podCreationTimestamp="2026-01-21 11:36:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:36:34.530845238 +0000 UTC m=+2486.134737172" watchObservedRunningTime="2026-01-21 11:36:34.540550005 +0000 UTC m=+2486.144441939" Jan 21 11:36:35 crc kubenswrapper[4925]: I0121 11:36:35.533806 4925 generic.go:334] "Generic (PLEG): container finished" podID="d621d4c5-909d-4fc8-8113-d898b0f87caf" containerID="89e6319de72d495e73439f260b648b87927152fb793b072e3744077e4be2fb2d" exitCode=0 Jan 21 11:36:35 crc kubenswrapper[4925]: I0121 11:36:35.534376 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" event={"ID":"d621d4c5-909d-4fc8-8113-d898b0f87caf","Type":"ContainerDied","Data":"89e6319de72d495e73439f260b648b87927152fb793b072e3744077e4be2fb2d"} Jan 21 11:36:35 crc kubenswrapper[4925]: I0121 11:36:35.543691 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerStarted","Data":"f4485638eafd70a7f2f4fa1a49b01bd8bc56d4ce4a64cc33ac9fc63b4ce997ad"} Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.350984 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.467595 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ph7d\" (UniqueName: \"kubernetes.io/projected/e8275c69-592d-4a37-a63b-5797f3d156f4-kube-api-access-5ph7d\") pod \"e8275c69-592d-4a37-a63b-5797f3d156f4\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.467744 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8275c69-592d-4a37-a63b-5797f3d156f4-operator-scripts\") pod \"e8275c69-592d-4a37-a63b-5797f3d156f4\" (UID: \"e8275c69-592d-4a37-a63b-5797f3d156f4\") " Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.468875 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8275c69-592d-4a37-a63b-5797f3d156f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8275c69-592d-4a37-a63b-5797f3d156f4" (UID: "e8275c69-592d-4a37-a63b-5797f3d156f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.474915 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8275c69-592d-4a37-a63b-5797f3d156f4-kube-api-access-5ph7d" (OuterVolumeSpecName: "kube-api-access-5ph7d") pod "e8275c69-592d-4a37-a63b-5797f3d156f4" (UID: "e8275c69-592d-4a37-a63b-5797f3d156f4"). InnerVolumeSpecName "kube-api-access-5ph7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.563755 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-q664r" event={"ID":"e8275c69-592d-4a37-a63b-5797f3d156f4","Type":"ContainerDied","Data":"08b948a4fb0598b87302bb16b698c01af16d3126dbdaa5f1036c6d342054dd64"} Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.563815 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08b948a4fb0598b87302bb16b698c01af16d3126dbdaa5f1036c6d342054dd64" Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.563908 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-q664r" Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.733379 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerStarted","Data":"704c35b12a1178c80ba08580804e4fe2a313713d9fdf24e744bf974caa304547"} Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.737639 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ph7d\" (UniqueName: \"kubernetes.io/projected/e8275c69-592d-4a37-a63b-5797f3d156f4-kube-api-access-5ph7d\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:36 crc kubenswrapper[4925]: I0121 11:36:36.737686 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8275c69-592d-4a37-a63b-5797f3d156f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.156921 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.366441 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jh657\" (UniqueName: \"kubernetes.io/projected/d621d4c5-909d-4fc8-8113-d898b0f87caf-kube-api-access-jh657\") pod \"d621d4c5-909d-4fc8-8113-d898b0f87caf\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.366877 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d621d4c5-909d-4fc8-8113-d898b0f87caf-operator-scripts\") pod \"d621d4c5-909d-4fc8-8113-d898b0f87caf\" (UID: \"d621d4c5-909d-4fc8-8113-d898b0f87caf\") " Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.368816 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d621d4c5-909d-4fc8-8113-d898b0f87caf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d621d4c5-909d-4fc8-8113-d898b0f87caf" (UID: "d621d4c5-909d-4fc8-8113-d898b0f87caf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.406364 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d621d4c5-909d-4fc8-8113-d898b0f87caf-kube-api-access-jh657" (OuterVolumeSpecName: "kube-api-access-jh657") pod "d621d4c5-909d-4fc8-8113-d898b0f87caf" (UID: "d621d4c5-909d-4fc8-8113-d898b0f87caf"). InnerVolumeSpecName "kube-api-access-jh657". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.469849 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jh657\" (UniqueName: \"kubernetes.io/projected/d621d4c5-909d-4fc8-8113-d898b0f87caf-kube-api-access-jh657\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.469911 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d621d4c5-909d-4fc8-8113-d898b0f87caf-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.744632 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" event={"ID":"d621d4c5-909d-4fc8-8113-d898b0f87caf","Type":"ContainerDied","Data":"28afffeec039529c1db248b6b5ee21cf0c910548b512dd242561b53ececd610f"} Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.744690 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28afffeec039529c1db248b6b5ee21cf0c910548b512dd242561b53ececd610f" Jan 21 11:36:37 crc kubenswrapper[4925]: I0121 11:36:37.744686 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-6761-account-create-update-8lfh8" Jan 21 11:36:38 crc kubenswrapper[4925]: I0121 11:36:38.760253 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerStarted","Data":"14abbcfe5eca6b94cd1fa1dab9428c7d148448bab3fb61ec349d9ca8155d032c"} Jan 21 11:36:38 crc kubenswrapper[4925]: I0121 11:36:38.762210 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:36:38 crc kubenswrapper[4925]: I0121 11:36:38.804151 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.905764409 podStartE2EDuration="7.804122868s" podCreationTimestamp="2026-01-21 11:36:31 +0000 UTC" firstStartedPulling="2026-01-21 11:36:32.057909316 +0000 UTC m=+2483.661801250" lastFinishedPulling="2026-01-21 11:36:37.956267765 +0000 UTC m=+2489.560159709" observedRunningTime="2026-01-21 11:36:38.79278872 +0000 UTC m=+2490.396680664" watchObservedRunningTime="2026-01-21 11:36:38.804122868 +0000 UTC m=+2490.408014802" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.286087 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx"] Jan 21 11:36:43 crc kubenswrapper[4925]: E0121 11:36:43.287032 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8275c69-592d-4a37-a63b-5797f3d156f4" containerName="mariadb-database-create" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.287049 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8275c69-592d-4a37-a63b-5797f3d156f4" containerName="mariadb-database-create" Jan 21 11:36:43 crc kubenswrapper[4925]: E0121 11:36:43.287078 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d621d4c5-909d-4fc8-8113-d898b0f87caf" containerName="mariadb-account-create-update" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.287087 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d621d4c5-909d-4fc8-8113-d898b0f87caf" containerName="mariadb-account-create-update" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.287329 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d621d4c5-909d-4fc8-8113-d898b0f87caf" containerName="mariadb-account-create-update" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.287355 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8275c69-592d-4a37-a63b-5797f3d156f4" containerName="mariadb-database-create" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.288061 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.291196 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-zsgbh" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.291765 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.309528 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx"] Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.481988 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-config-data\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.482082 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.482455 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-db-sync-config-data\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.482759 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fmwr\" (UniqueName: \"kubernetes.io/projected/9f25435a-1493-4958-846e-4d0ccffc4ba0-kube-api-access-9fmwr\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.584013 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-db-sync-config-data\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.584114 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fmwr\" (UniqueName: \"kubernetes.io/projected/9f25435a-1493-4958-846e-4d0ccffc4ba0-kube-api-access-9fmwr\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.584200 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-config-data\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.584284 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.591204 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-config-data\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.591233 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.591873 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-db-sync-config-data\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.604703 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fmwr\" (UniqueName: \"kubernetes.io/projected/9f25435a-1493-4958-846e-4d0ccffc4ba0-kube-api-access-9fmwr\") pod \"watcher-kuttl-db-sync-fgqgx\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:43 crc kubenswrapper[4925]: I0121 11:36:43.608152 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:44 crc kubenswrapper[4925]: I0121 11:36:44.144131 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx"] Jan 21 11:36:44 crc kubenswrapper[4925]: W0121 11:36:44.164343 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f25435a_1493_4958_846e_4d0ccffc4ba0.slice/crio-b659d8ce04cb0fb2edb29c517ac0516af0d86fb69dcf112291dd3aac98f6febf WatchSource:0}: Error finding container b659d8ce04cb0fb2edb29c517ac0516af0d86fb69dcf112291dd3aac98f6febf: Status 404 returned error can't find the container with id b659d8ce04cb0fb2edb29c517ac0516af0d86fb69dcf112291dd3aac98f6febf Jan 21 11:36:44 crc kubenswrapper[4925]: I0121 11:36:44.820957 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" event={"ID":"9f25435a-1493-4958-846e-4d0ccffc4ba0","Type":"ContainerStarted","Data":"b659d8ce04cb0fb2edb29c517ac0516af0d86fb69dcf112291dd3aac98f6febf"} Jan 21 11:36:45 crc kubenswrapper[4925]: I0121 11:36:45.925716 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" event={"ID":"9f25435a-1493-4958-846e-4d0ccffc4ba0","Type":"ContainerStarted","Data":"86260796942f1ba88f9c7d449270684752bfd79685c5d81f380dd2acb1eb3dd6"} Jan 21 11:36:45 crc kubenswrapper[4925]: I0121 11:36:45.954279 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" podStartSLOduration=2.954247854 podStartE2EDuration="2.954247854s" podCreationTimestamp="2026-01-21 11:36:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:36:45.947038286 +0000 UTC m=+2497.550930230" watchObservedRunningTime="2026-01-21 11:36:45.954247854 +0000 UTC m=+2497.558139788" Jan 21 11:36:46 crc kubenswrapper[4925]: I0121 11:36:46.557347 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:36:46 crc kubenswrapper[4925]: E0121 11:36:46.557642 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:36:48 crc kubenswrapper[4925]: I0121 11:36:48.957717 4925 generic.go:334] "Generic (PLEG): container finished" podID="9f25435a-1493-4958-846e-4d0ccffc4ba0" containerID="86260796942f1ba88f9c7d449270684752bfd79685c5d81f380dd2acb1eb3dd6" exitCode=0 Jan 21 11:36:48 crc kubenswrapper[4925]: I0121 11:36:48.957955 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" event={"ID":"9f25435a-1493-4958-846e-4d0ccffc4ba0","Type":"ContainerDied","Data":"86260796942f1ba88f9c7d449270684752bfd79685c5d81f380dd2acb1eb3dd6"} Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.304913 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.467870 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-combined-ca-bundle\") pod \"9f25435a-1493-4958-846e-4d0ccffc4ba0\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.467984 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fmwr\" (UniqueName: \"kubernetes.io/projected/9f25435a-1493-4958-846e-4d0ccffc4ba0-kube-api-access-9fmwr\") pod \"9f25435a-1493-4958-846e-4d0ccffc4ba0\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.468025 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-db-sync-config-data\") pod \"9f25435a-1493-4958-846e-4d0ccffc4ba0\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.468109 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-config-data\") pod \"9f25435a-1493-4958-846e-4d0ccffc4ba0\" (UID: \"9f25435a-1493-4958-846e-4d0ccffc4ba0\") " Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.486699 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f25435a-1493-4958-846e-4d0ccffc4ba0-kube-api-access-9fmwr" (OuterVolumeSpecName: "kube-api-access-9fmwr") pod "9f25435a-1493-4958-846e-4d0ccffc4ba0" (UID: "9f25435a-1493-4958-846e-4d0ccffc4ba0"). InnerVolumeSpecName "kube-api-access-9fmwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.488476 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "9f25435a-1493-4958-846e-4d0ccffc4ba0" (UID: "9f25435a-1493-4958-846e-4d0ccffc4ba0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.516242 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9f25435a-1493-4958-846e-4d0ccffc4ba0" (UID: "9f25435a-1493-4958-846e-4d0ccffc4ba0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.530074 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-config-data" (OuterVolumeSpecName: "config-data") pod "9f25435a-1493-4958-846e-4d0ccffc4ba0" (UID: "9f25435a-1493-4958-846e-4d0ccffc4ba0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.570344 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.570412 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.570425 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f25435a-1493-4958-846e-4d0ccffc4ba0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:50 crc kubenswrapper[4925]: I0121 11:36:50.570439 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fmwr\" (UniqueName: \"kubernetes.io/projected/9f25435a-1493-4958-846e-4d0ccffc4ba0-kube-api-access-9fmwr\") on node \"crc\" DevicePath \"\"" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.086339 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" event={"ID":"9f25435a-1493-4958-846e-4d0ccffc4ba0","Type":"ContainerDied","Data":"b659d8ce04cb0fb2edb29c517ac0516af0d86fb69dcf112291dd3aac98f6febf"} Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.086473 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b659d8ce04cb0fb2edb29c517ac0516af0d86fb69dcf112291dd3aac98f6febf" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.086492 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.246583 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:36:51 crc kubenswrapper[4925]: E0121 11:36:51.248366 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f25435a-1493-4958-846e-4d0ccffc4ba0" containerName="watcher-kuttl-db-sync" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.249638 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f25435a-1493-4958-846e-4d0ccffc4ba0" containerName="watcher-kuttl-db-sync" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.261227 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f25435a-1493-4958-846e-4d0ccffc4ba0" containerName="watcher-kuttl-db-sync" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.262272 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.283820 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.300541 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-zsgbh" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.301048 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.343095 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.344864 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.348742 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.362566 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.372382 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds2dr\" (UniqueName: \"kubernetes.io/projected/b80f39a0-48d4-43bd-9fe2-33a90c94e003-kube-api-access-ds2dr\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.372471 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b80f39a0-48d4-43bd-9fe2-33a90c94e003-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.372536 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.372575 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.372604 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.372631 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.424298 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.425524 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.429241 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.446431 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475013 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475087 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475145 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b305fc-2199-4df9-a84f-95a98def4162-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475286 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475322 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds2dr\" (UniqueName: \"kubernetes.io/projected/b80f39a0-48d4-43bd-9fe2-33a90c94e003-kube-api-access-ds2dr\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475350 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmtqc\" (UniqueName: \"kubernetes.io/projected/38b305fc-2199-4df9-a84f-95a98def4162-kube-api-access-zmtqc\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475408 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b80f39a0-48d4-43bd-9fe2-33a90c94e003-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475457 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475521 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475556 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.475600 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.476437 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b80f39a0-48d4-43bd-9fe2-33a90c94e003-logs\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.480806 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.497843 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.505425 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds2dr\" (UniqueName: \"kubernetes.io/projected/b80f39a0-48d4-43bd-9fe2-33a90c94e003-kube-api-access-ds2dr\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.517446 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.520222 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712239 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712590 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712663 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712716 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b305fc-2199-4df9-a84f-95a98def4162-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712772 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5906ef45-d0ed-40d7-b844-3ca70ed28c91-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712808 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712842 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712885 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712933 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pdqz\" (UniqueName: \"kubernetes.io/projected/5906ef45-d0ed-40d7-b844-3ca70ed28c91-kube-api-access-2pdqz\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712960 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmtqc\" (UniqueName: \"kubernetes.io/projected/38b305fc-2199-4df9-a84f-95a98def4162-kube-api-access-zmtqc\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712986 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.712388 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.717266 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b305fc-2199-4df9-a84f-95a98def4162-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.722648 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.723018 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.726175 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.739178 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmtqc\" (UniqueName: \"kubernetes.io/projected/38b305fc-2199-4df9-a84f-95a98def4162-kube-api-access-zmtqc\") pod \"watcher-kuttl-applier-0\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.814724 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5906ef45-d0ed-40d7-b844-3ca70ed28c91-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.814804 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.814842 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.814901 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pdqz\" (UniqueName: \"kubernetes.io/projected/5906ef45-d0ed-40d7-b844-3ca70ed28c91-kube-api-access-2pdqz\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.814935 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.815038 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.815219 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5906ef45-d0ed-40d7-b844-3ca70ed28c91-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.822151 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.822943 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.826109 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.826278 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.837209 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pdqz\" (UniqueName: \"kubernetes.io/projected/5906ef45-d0ed-40d7-b844-3ca70ed28c91-kube-api-access-2pdqz\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:51 crc kubenswrapper[4925]: I0121 11:36:51.977316 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:52 crc kubenswrapper[4925]: I0121 11:36:52.060888 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:36:52 crc kubenswrapper[4925]: I0121 11:36:52.274431 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:36:52 crc kubenswrapper[4925]: I0121 11:36:52.899620 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:36:52 crc kubenswrapper[4925]: W0121 11:36:52.913069 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5906ef45_d0ed_40d7_b844_3ca70ed28c91.slice/crio-f3e3ccf62eaa208570f8878e3ad3cce85d3c713b47fcdd3a024294b67abb576a WatchSource:0}: Error finding container f3e3ccf62eaa208570f8878e3ad3cce85d3c713b47fcdd3a024294b67abb576a: Status 404 returned error can't find the container with id f3e3ccf62eaa208570f8878e3ad3cce85d3c713b47fcdd3a024294b67abb576a Jan 21 11:36:53 crc kubenswrapper[4925]: I0121 11:36:53.000233 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:36:53 crc kubenswrapper[4925]: W0121 11:36:53.003736 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38b305fc_2199_4df9_a84f_95a98def4162.slice/crio-f11cfdb514bb80bf830b522f72f1d22464a071afca9f5ebb5ee0047499a15c94 WatchSource:0}: Error finding container f11cfdb514bb80bf830b522f72f1d22464a071afca9f5ebb5ee0047499a15c94: Status 404 returned error can't find the container with id f11cfdb514bb80bf830b522f72f1d22464a071afca9f5ebb5ee0047499a15c94 Jan 21 11:36:53 crc kubenswrapper[4925]: I0121 11:36:53.126111 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b80f39a0-48d4-43bd-9fe2-33a90c94e003","Type":"ContainerStarted","Data":"46e2312184e5af281dbdc3a41b4be581b3ef9286384aba24349da801e1d13909"} Jan 21 11:36:53 crc kubenswrapper[4925]: I0121 11:36:53.126180 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b80f39a0-48d4-43bd-9fe2-33a90c94e003","Type":"ContainerStarted","Data":"12cbdd363c52e7170fcc7f93c1bbd75844c7498c39f34a99920e8c3417dc74eb"} Jan 21 11:36:53 crc kubenswrapper[4925]: I0121 11:36:53.128864 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"5906ef45-d0ed-40d7-b844-3ca70ed28c91","Type":"ContainerStarted","Data":"f3e3ccf62eaa208570f8878e3ad3cce85d3c713b47fcdd3a024294b67abb576a"} Jan 21 11:36:53 crc kubenswrapper[4925]: I0121 11:36:53.132318 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"38b305fc-2199-4df9-a84f-95a98def4162","Type":"ContainerStarted","Data":"f11cfdb514bb80bf830b522f72f1d22464a071afca9f5ebb5ee0047499a15c94"} Jan 21 11:36:54 crc kubenswrapper[4925]: I0121 11:36:54.145902 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"5906ef45-d0ed-40d7-b844-3ca70ed28c91","Type":"ContainerStarted","Data":"d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873"} Jan 21 11:36:54 crc kubenswrapper[4925]: I0121 11:36:54.152808 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"38b305fc-2199-4df9-a84f-95a98def4162","Type":"ContainerStarted","Data":"e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e"} Jan 21 11:36:54 crc kubenswrapper[4925]: I0121 11:36:54.192658 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b80f39a0-48d4-43bd-9fe2-33a90c94e003","Type":"ContainerStarted","Data":"2beefae7b298264c0a529e8abf2efca7adb06cda1d8234c6890c457ae47fa351"} Jan 21 11:36:54 crc kubenswrapper[4925]: I0121 11:36:54.193983 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:54 crc kubenswrapper[4925]: I0121 11:36:54.365280 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=3.365228034 podStartE2EDuration="3.365228034s" podCreationTimestamp="2026-01-21 11:36:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:36:54.357989206 +0000 UTC m=+2505.961881150" watchObservedRunningTime="2026-01-21 11:36:54.365228034 +0000 UTC m=+2505.969119998" Jan 21 11:36:54 crc kubenswrapper[4925]: I0121 11:36:54.386733 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=3.386701812 podStartE2EDuration="3.386701812s" podCreationTimestamp="2026-01-21 11:36:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:36:54.383315365 +0000 UTC m=+2505.987207299" watchObservedRunningTime="2026-01-21 11:36:54.386701812 +0000 UTC m=+2505.990593746" Jan 21 11:36:54 crc kubenswrapper[4925]: I0121 11:36:54.432345 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=3.432319539 podStartE2EDuration="3.432319539s" podCreationTimestamp="2026-01-21 11:36:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:36:54.424644038 +0000 UTC m=+2506.028535982" watchObservedRunningTime="2026-01-21 11:36:54.432319539 +0000 UTC m=+2506.036211473" Jan 21 11:36:56 crc kubenswrapper[4925]: I0121 11:36:56.838622 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:56 crc kubenswrapper[4925]: I0121 11:36:56.839045 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:36:56 crc kubenswrapper[4925]: I0121 11:36:56.978553 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:36:58 crc kubenswrapper[4925]: I0121 11:36:58.229997 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:36:58 crc kubenswrapper[4925]: I0121 11:36:58.503873 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:36:58 crc kubenswrapper[4925]: E0121 11:36:58.506108 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:37:01 crc kubenswrapper[4925]: I0121 11:37:01.545945 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:01 crc kubenswrapper[4925]: I0121 11:37:01.713007 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:01 crc kubenswrapper[4925]: I0121 11:37:01.721939 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:01 crc kubenswrapper[4925]: I0121 11:37:01.978437 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:02 crc kubenswrapper[4925]: I0121 11:37:02.011696 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:02 crc kubenswrapper[4925]: I0121 11:37:02.062137 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:02 crc kubenswrapper[4925]: I0121 11:37:02.091593 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:02 crc kubenswrapper[4925]: I0121 11:37:02.275125 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:02 crc kubenswrapper[4925]: I0121 11:37:02.282887 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:02 crc kubenswrapper[4925]: I0121 11:37:02.304146 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:02 crc kubenswrapper[4925]: I0121 11:37:02.304206 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:04 crc kubenswrapper[4925]: I0121 11:37:04.283847 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:04 crc kubenswrapper[4925]: I0121 11:37:04.284246 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-central-agent" containerID="cri-o://2fce1b41fadfa18a23dc358b1e9d9f5222ebac9f3f5075a02b80593064c37b9c" gracePeriod=30 Jan 21 11:37:04 crc kubenswrapper[4925]: I0121 11:37:04.284288 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="sg-core" containerID="cri-o://704c35b12a1178c80ba08580804e4fe2a313713d9fdf24e744bf974caa304547" gracePeriod=30 Jan 21 11:37:04 crc kubenswrapper[4925]: I0121 11:37:04.284378 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-notification-agent" containerID="cri-o://f4485638eafd70a7f2f4fa1a49b01bd8bc56d4ce4a64cc33ac9fc63b4ce997ad" gracePeriod=30 Jan 21 11:37:04 crc kubenswrapper[4925]: I0121 11:37:04.284339 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="proxy-httpd" containerID="cri-o://14abbcfe5eca6b94cd1fa1dab9428c7d148448bab3fb61ec349d9ca8155d032c" gracePeriod=30 Jan 21 11:37:05 crc kubenswrapper[4925]: I0121 11:37:05.308434 4925 generic.go:334] "Generic (PLEG): container finished" podID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerID="14abbcfe5eca6b94cd1fa1dab9428c7d148448bab3fb61ec349d9ca8155d032c" exitCode=0 Jan 21 11:37:05 crc kubenswrapper[4925]: I0121 11:37:05.308792 4925 generic.go:334] "Generic (PLEG): container finished" podID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerID="704c35b12a1178c80ba08580804e4fe2a313713d9fdf24e744bf974caa304547" exitCode=2 Jan 21 11:37:05 crc kubenswrapper[4925]: I0121 11:37:05.308805 4925 generic.go:334] "Generic (PLEG): container finished" podID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerID="2fce1b41fadfa18a23dc358b1e9d9f5222ebac9f3f5075a02b80593064c37b9c" exitCode=0 Jan 21 11:37:05 crc kubenswrapper[4925]: I0121 11:37:05.308526 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerDied","Data":"14abbcfe5eca6b94cd1fa1dab9428c7d148448bab3fb61ec349d9ca8155d032c"} Jan 21 11:37:05 crc kubenswrapper[4925]: I0121 11:37:05.308847 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerDied","Data":"704c35b12a1178c80ba08580804e4fe2a313713d9fdf24e744bf974caa304547"} Jan 21 11:37:05 crc kubenswrapper[4925]: I0121 11:37:05.308861 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerDied","Data":"2fce1b41fadfa18a23dc358b1e9d9f5222ebac9f3f5075a02b80593064c37b9c"} Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.340148 4925 generic.go:334] "Generic (PLEG): container finished" podID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerID="f4485638eafd70a7f2f4fa1a49b01bd8bc56d4ce4a64cc33ac9fc63b4ce997ad" exitCode=0 Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.340618 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerDied","Data":"f4485638eafd70a7f2f4fa1a49b01bd8bc56d4ce4a64cc33ac9fc63b4ce997ad"} Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.500546 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.594953 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-run-httpd\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.595044 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-log-httpd\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.595073 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-ceilometer-tls-certs\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.595211 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-sg-core-conf-yaml\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.595244 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmr46\" (UniqueName: \"kubernetes.io/projected/f5a286aa-0a25-4bfc-b5b6-c38d70648300-kube-api-access-rmr46\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.595278 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-combined-ca-bundle\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.595312 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-scripts\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.595357 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-config-data\") pod \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\" (UID: \"f5a286aa-0a25-4bfc-b5b6-c38d70648300\") " Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.599054 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.599599 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.602662 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5a286aa-0a25-4bfc-b5b6-c38d70648300-kube-api-access-rmr46" (OuterVolumeSpecName: "kube-api-access-rmr46") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "kube-api-access-rmr46". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.603686 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-scripts" (OuterVolumeSpecName: "scripts") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.655270 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.656995 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.681979 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.699814 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.699847 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a286aa-0a25-4bfc-b5b6-c38d70648300-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.699861 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.699874 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.699885 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmr46\" (UniqueName: \"kubernetes.io/projected/f5a286aa-0a25-4bfc-b5b6-c38d70648300-kube-api-access-rmr46\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.699897 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.699907 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.718881 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-config-data" (OuterVolumeSpecName: "config-data") pod "f5a286aa-0a25-4bfc-b5b6-c38d70648300" (UID: "f5a286aa-0a25-4bfc-b5b6-c38d70648300"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:06 crc kubenswrapper[4925]: I0121 11:37:06.801905 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a286aa-0a25-4bfc-b5b6-c38d70648300-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.385067 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"f5a286aa-0a25-4bfc-b5b6-c38d70648300","Type":"ContainerDied","Data":"e3ec47fd35cff81315868557d82392ac25d562fa00104c830d97df4ffc2e9713"} Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.385156 4925 scope.go:117] "RemoveContainer" containerID="14abbcfe5eca6b94cd1fa1dab9428c7d148448bab3fb61ec349d9ca8155d032c" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.385179 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.415240 4925 scope.go:117] "RemoveContainer" containerID="704c35b12a1178c80ba08580804e4fe2a313713d9fdf24e744bf974caa304547" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.438907 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.447354 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.462861 4925 scope.go:117] "RemoveContainer" containerID="f4485638eafd70a7f2f4fa1a49b01bd8bc56d4ce4a64cc33ac9fc63b4ce997ad" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.483309 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:07 crc kubenswrapper[4925]: E0121 11:37:07.483762 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="proxy-httpd" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.483785 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="proxy-httpd" Jan 21 11:37:07 crc kubenswrapper[4925]: E0121 11:37:07.483798 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="sg-core" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.483804 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="sg-core" Jan 21 11:37:07 crc kubenswrapper[4925]: E0121 11:37:07.483825 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-notification-agent" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.483833 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-notification-agent" Jan 21 11:37:07 crc kubenswrapper[4925]: E0121 11:37:07.483845 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-central-agent" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.483850 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-central-agent" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.484012 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="proxy-httpd" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.484027 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-notification-agent" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.484038 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="ceilometer-central-agent" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.484051 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" containerName="sg-core" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.486236 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.494044 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.494344 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.494630 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.518024 4925 scope.go:117] "RemoveContainer" containerID="2fce1b41fadfa18a23dc358b1e9d9f5222ebac9f3f5075a02b80593064c37b9c" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.540022 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5a286aa-0a25-4bfc-b5b6-c38d70648300" path="/var/lib/kubelet/pods/f5a286aa-0a25-4bfc-b5b6-c38d70648300/volumes" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.540941 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:07 crc kubenswrapper[4925]: E0121 11:37:07.564120 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5a286aa_0a25_4bfc_b5b6_c38d70648300.slice\": RecentStats: unable to find data in memory cache]" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.619354 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-config-data\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.620111 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-scripts\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.620516 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckrcl\" (UniqueName: \"kubernetes.io/projected/3dd0bf47-7633-4758-abc8-631f03f1734b-kube-api-access-ckrcl\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.620660 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.620921 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-run-httpd\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.621106 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-log-httpd\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.621241 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.621451 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723140 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723279 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723417 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-config-data\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723445 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-scripts\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723527 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckrcl\" (UniqueName: \"kubernetes.io/projected/3dd0bf47-7633-4758-abc8-631f03f1734b-kube-api-access-ckrcl\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723609 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723631 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-run-httpd\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.723724 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-log-httpd\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.724506 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-log-httpd\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.724791 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-run-httpd\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.729453 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.729491 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.729576 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-scripts\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.737016 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-config-data\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.743126 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.744148 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckrcl\" (UniqueName: \"kubernetes.io/projected/3dd0bf47-7633-4758-abc8-631f03f1734b-kube-api-access-ckrcl\") pod \"ceilometer-0\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:07 crc kubenswrapper[4925]: I0121 11:37:07.839027 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:08 crc kubenswrapper[4925]: I0121 11:37:08.347887 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:08 crc kubenswrapper[4925]: W0121 11:37:08.406671 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3dd0bf47_7633_4758_abc8_631f03f1734b.slice/crio-1d3c57c42a2f1142af9fb593bbe0562cfa9540252cb14d021534034748c51bf0 WatchSource:0}: Error finding container 1d3c57c42a2f1142af9fb593bbe0562cfa9540252cb14d021534034748c51bf0: Status 404 returned error can't find the container with id 1d3c57c42a2f1142af9fb593bbe0562cfa9540252cb14d021534034748c51bf0 Jan 21 11:37:08 crc kubenswrapper[4925]: I0121 11:37:08.409965 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:37:09 crc kubenswrapper[4925]: I0121 11:37:09.405159 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerStarted","Data":"1d3c57c42a2f1142af9fb593bbe0562cfa9540252cb14d021534034748c51bf0"} Jan 21 11:37:09 crc kubenswrapper[4925]: I0121 11:37:09.509827 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:37:09 crc kubenswrapper[4925]: E0121 11:37:09.510294 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:37:10 crc kubenswrapper[4925]: I0121 11:37:10.416028 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerStarted","Data":"ee604e9cd650fda4ebeea6f85926b10a4a2d9630607a1f4c88b9dfc812b2b7de"} Jan 21 11:37:11 crc kubenswrapper[4925]: I0121 11:37:11.426443 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerStarted","Data":"d99b1845f32193eebe1c606f24ded709182f69c0c12f7a2ac1cdb03ad54d8ef1"} Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.453919 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerStarted","Data":"e64b83cf69c51a9452ab4ee1bfde67ee3d9f09f0707fbeea2fba7eba57d21e29"} Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.479407 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx"] Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.489388 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-fgqgx"] Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.513970 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f25435a-1493-4958-846e-4d0ccffc4ba0" path="/var/lib/kubelet/pods/9f25435a-1493-4958-846e-4d0ccffc4ba0/volumes" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.520158 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher6761-account-delete-qt5hz"] Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.521617 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.541146 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher6761-account-delete-qt5hz"] Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.557553 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.557897 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="5906ef45-d0ed-40d7-b844-3ca70ed28c91" containerName="watcher-decision-engine" containerID="cri-o://d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873" gracePeriod=30 Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.630476 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a93b7eef-8870-4f5a-9b2e-509c4953c740-operator-scripts\") pod \"watcher6761-account-delete-qt5hz\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.631917 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs5rn\" (UniqueName: \"kubernetes.io/projected/a93b7eef-8870-4f5a-9b2e-509c4953c740-kube-api-access-gs5rn\") pod \"watcher6761-account-delete-qt5hz\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.652193 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.652552 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-kuttl-api-log" containerID="cri-o://46e2312184e5af281dbdc3a41b4be581b3ef9286384aba24349da801e1d13909" gracePeriod=30 Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.653178 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-api" containerID="cri-o://2beefae7b298264c0a529e8abf2efca7adb06cda1d8234c6890c457ae47fa351" gracePeriod=30 Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.710649 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.710942 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="38b305fc-2199-4df9-a84f-95a98def4162" containerName="watcher-applier" containerID="cri-o://e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e" gracePeriod=30 Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.737228 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs5rn\" (UniqueName: \"kubernetes.io/projected/a93b7eef-8870-4f5a-9b2e-509c4953c740-kube-api-access-gs5rn\") pod \"watcher6761-account-delete-qt5hz\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.737358 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a93b7eef-8870-4f5a-9b2e-509c4953c740-operator-scripts\") pod \"watcher6761-account-delete-qt5hz\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.738526 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a93b7eef-8870-4f5a-9b2e-509c4953c740-operator-scripts\") pod \"watcher6761-account-delete-qt5hz\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.778786 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs5rn\" (UniqueName: \"kubernetes.io/projected/a93b7eef-8870-4f5a-9b2e-509c4953c740-kube-api-access-gs5rn\") pod \"watcher6761-account-delete-qt5hz\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:13 crc kubenswrapper[4925]: I0121 11:37:13.844987 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:14 crc kubenswrapper[4925]: I0121 11:37:14.439196 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher6761-account-delete-qt5hz"] Jan 21 11:37:14 crc kubenswrapper[4925]: I0121 11:37:14.496506 4925 generic.go:334] "Generic (PLEG): container finished" podID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerID="46e2312184e5af281dbdc3a41b4be581b3ef9286384aba24349da801e1d13909" exitCode=143 Jan 21 11:37:14 crc kubenswrapper[4925]: I0121 11:37:14.496574 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b80f39a0-48d4-43bd-9fe2-33a90c94e003","Type":"ContainerDied","Data":"46e2312184e5af281dbdc3a41b4be581b3ef9286384aba24349da801e1d13909"} Jan 21 11:37:15 crc kubenswrapper[4925]: I0121 11:37:15.512673 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" event={"ID":"a93b7eef-8870-4f5a-9b2e-509c4953c740","Type":"ContainerStarted","Data":"d99c3d55a9cfbb6dbbcc9813de8123a4bf40b2d6e46f6b7f1701cbb263ec9ffd"} Jan 21 11:37:15 crc kubenswrapper[4925]: I0121 11:37:15.513308 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" event={"ID":"a93b7eef-8870-4f5a-9b2e-509c4953c740","Type":"ContainerStarted","Data":"f353b123c4f696a51f55739453684087971b52f9252d2454bf9acd94718bbdbc"} Jan 21 11:37:15 crc kubenswrapper[4925]: I0121 11:37:15.519195 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerStarted","Data":"84566f1e802e63d0935b2cd17b5f97855a4986b2a02cf2cd62023fcaed760b0b"} Jan 21 11:37:15 crc kubenswrapper[4925]: I0121 11:37:15.525406 4925 generic.go:334] "Generic (PLEG): container finished" podID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerID="2beefae7b298264c0a529e8abf2efca7adb06cda1d8234c6890c457ae47fa351" exitCode=0 Jan 21 11:37:15 crc kubenswrapper[4925]: I0121 11:37:15.525736 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b80f39a0-48d4-43bd-9fe2-33a90c94e003","Type":"ContainerDied","Data":"2beefae7b298264c0a529e8abf2efca7adb06cda1d8234c6890c457ae47fa351"} Jan 21 11:37:15 crc kubenswrapper[4925]: I0121 11:37:15.579354 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.510606213 podStartE2EDuration="8.579330312s" podCreationTimestamp="2026-01-21 11:37:07 +0000 UTC" firstStartedPulling="2026-01-21 11:37:08.409723311 +0000 UTC m=+2520.013615245" lastFinishedPulling="2026-01-21 11:37:14.47844741 +0000 UTC m=+2526.082339344" observedRunningTime="2026-01-21 11:37:15.57740888 +0000 UTC m=+2527.181300834" watchObservedRunningTime="2026-01-21 11:37:15.579330312 +0000 UTC m=+2527.183222246" Jan 21 11:37:15 crc kubenswrapper[4925]: I0121 11:37:15.587497 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" podStartSLOduration=2.587470618 podStartE2EDuration="2.587470618s" podCreationTimestamp="2026-01-21 11:37:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:37:15.5367927 +0000 UTC m=+2527.140684644" watchObservedRunningTime="2026-01-21 11:37:15.587470618 +0000 UTC m=+2527.191362552" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.481532 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.551083 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"b80f39a0-48d4-43bd-9fe2-33a90c94e003","Type":"ContainerDied","Data":"12cbdd363c52e7170fcc7f93c1bbd75844c7498c39f34a99920e8c3417dc74eb"} Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.551481 4925 scope.go:117] "RemoveContainer" containerID="2beefae7b298264c0a529e8abf2efca7adb06cda1d8234c6890c457ae47fa351" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.551695 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.558353 4925 generic.go:334] "Generic (PLEG): container finished" podID="a93b7eef-8870-4f5a-9b2e-509c4953c740" containerID="d99c3d55a9cfbb6dbbcc9813de8123a4bf40b2d6e46f6b7f1701cbb263ec9ffd" exitCode=0 Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.562039 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" event={"ID":"a93b7eef-8870-4f5a-9b2e-509c4953c740","Type":"ContainerDied","Data":"d99c3d55a9cfbb6dbbcc9813de8123a4bf40b2d6e46f6b7f1701cbb263ec9ffd"} Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.562251 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.601645 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-cert-memcached-mtls\") pod \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.605889 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds2dr\" (UniqueName: \"kubernetes.io/projected/b80f39a0-48d4-43bd-9fe2-33a90c94e003-kube-api-access-ds2dr\") pod \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.606189 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b80f39a0-48d4-43bd-9fe2-33a90c94e003-logs\") pod \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.606327 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-combined-ca-bundle\") pod \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.606453 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-config-data\") pod \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.606653 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-custom-prometheus-ca\") pod \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\" (UID: \"b80f39a0-48d4-43bd-9fe2-33a90c94e003\") " Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.610038 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b80f39a0-48d4-43bd-9fe2-33a90c94e003-logs" (OuterVolumeSpecName: "logs") pod "b80f39a0-48d4-43bd-9fe2-33a90c94e003" (UID: "b80f39a0-48d4-43bd-9fe2-33a90c94e003"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.614642 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b80f39a0-48d4-43bd-9fe2-33a90c94e003-kube-api-access-ds2dr" (OuterVolumeSpecName: "kube-api-access-ds2dr") pod "b80f39a0-48d4-43bd-9fe2-33a90c94e003" (UID: "b80f39a0-48d4-43bd-9fe2-33a90c94e003"). InnerVolumeSpecName "kube-api-access-ds2dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.615663 4925 scope.go:117] "RemoveContainer" containerID="46e2312184e5af281dbdc3a41b4be581b3ef9286384aba24349da801e1d13909" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.653177 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "b80f39a0-48d4-43bd-9fe2-33a90c94e003" (UID: "b80f39a0-48d4-43bd-9fe2-33a90c94e003"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.653780 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b80f39a0-48d4-43bd-9fe2-33a90c94e003" (UID: "b80f39a0-48d4-43bd-9fe2-33a90c94e003"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.668678 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-config-data" (OuterVolumeSpecName: "config-data") pod "b80f39a0-48d4-43bd-9fe2-33a90c94e003" (UID: "b80f39a0-48d4-43bd-9fe2-33a90c94e003"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.689292 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "b80f39a0-48d4-43bd-9fe2-33a90c94e003" (UID: "b80f39a0-48d4-43bd-9fe2-33a90c94e003"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.709107 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.709148 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds2dr\" (UniqueName: \"kubernetes.io/projected/b80f39a0-48d4-43bd-9fe2-33a90c94e003-kube-api-access-ds2dr\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.709158 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b80f39a0-48d4-43bd-9fe2-33a90c94e003-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.709167 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.709176 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.709186 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b80f39a0-48d4-43bd-9fe2-33a90c94e003-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.893206 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:37:16 crc kubenswrapper[4925]: I0121 11:37:16.904220 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:37:16 crc kubenswrapper[4925]: E0121 11:37:16.979439 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e is running failed: container process not found" containerID="e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:37:16 crc kubenswrapper[4925]: E0121 11:37:16.980052 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e is running failed: container process not found" containerID="e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:37:16 crc kubenswrapper[4925]: E0121 11:37:16.980477 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e is running failed: container process not found" containerID="e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:37:16 crc kubenswrapper[4925]: E0121 11:37:16.980531 4925 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e is running failed: container process not found" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="38b305fc-2199-4df9-a84f-95a98def4162" containerName="watcher-applier" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.517643 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" path="/var/lib/kubelet/pods/b80f39a0-48d4-43bd-9fe2-33a90c94e003/volumes" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.571164 4925 generic.go:334] "Generic (PLEG): container finished" podID="38b305fc-2199-4df9-a84f-95a98def4162" containerID="e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e" exitCode=0 Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.572233 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"38b305fc-2199-4df9-a84f-95a98def4162","Type":"ContainerDied","Data":"e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e"} Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.572368 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"38b305fc-2199-4df9-a84f-95a98def4162","Type":"ContainerDied","Data":"f11cfdb514bb80bf830b522f72f1d22464a071afca9f5ebb5ee0047499a15c94"} Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.572470 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f11cfdb514bb80bf830b522f72f1d22464a071afca9f5ebb5ee0047499a15c94" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.573494 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.610390 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.757261 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b305fc-2199-4df9-a84f-95a98def4162-logs\") pod \"38b305fc-2199-4df9-a84f-95a98def4162\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.757426 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-cert-memcached-mtls\") pod \"38b305fc-2199-4df9-a84f-95a98def4162\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.757479 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-config-data\") pod \"38b305fc-2199-4df9-a84f-95a98def4162\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.757526 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-combined-ca-bundle\") pod \"38b305fc-2199-4df9-a84f-95a98def4162\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.757616 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmtqc\" (UniqueName: \"kubernetes.io/projected/38b305fc-2199-4df9-a84f-95a98def4162-kube-api-access-zmtqc\") pod \"38b305fc-2199-4df9-a84f-95a98def4162\" (UID: \"38b305fc-2199-4df9-a84f-95a98def4162\") " Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.759466 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38b305fc-2199-4df9-a84f-95a98def4162-logs" (OuterVolumeSpecName: "logs") pod "38b305fc-2199-4df9-a84f-95a98def4162" (UID: "38b305fc-2199-4df9-a84f-95a98def4162"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.789472 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38b305fc-2199-4df9-a84f-95a98def4162-kube-api-access-zmtqc" (OuterVolumeSpecName: "kube-api-access-zmtqc") pod "38b305fc-2199-4df9-a84f-95a98def4162" (UID: "38b305fc-2199-4df9-a84f-95a98def4162"). InnerVolumeSpecName "kube-api-access-zmtqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.800050 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38b305fc-2199-4df9-a84f-95a98def4162" (UID: "38b305fc-2199-4df9-a84f-95a98def4162"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.840251 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-config-data" (OuterVolumeSpecName: "config-data") pod "38b305fc-2199-4df9-a84f-95a98def4162" (UID: "38b305fc-2199-4df9-a84f-95a98def4162"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.858882 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38b305fc-2199-4df9-a84f-95a98def4162-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.858948 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.858963 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:17 crc kubenswrapper[4925]: I0121 11:37:17.858982 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmtqc\" (UniqueName: \"kubernetes.io/projected/38b305fc-2199-4df9-a84f-95a98def4162-kube-api-access-zmtqc\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.042806 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "38b305fc-2199-4df9-a84f-95a98def4162" (UID: "38b305fc-2199-4df9-a84f-95a98def4162"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.113190 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/38b305fc-2199-4df9-a84f-95a98def4162-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.176909 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.317101 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gs5rn\" (UniqueName: \"kubernetes.io/projected/a93b7eef-8870-4f5a-9b2e-509c4953c740-kube-api-access-gs5rn\") pod \"a93b7eef-8870-4f5a-9b2e-509c4953c740\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.317277 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a93b7eef-8870-4f5a-9b2e-509c4953c740-operator-scripts\") pod \"a93b7eef-8870-4f5a-9b2e-509c4953c740\" (UID: \"a93b7eef-8870-4f5a-9b2e-509c4953c740\") " Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.317768 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93b7eef-8870-4f5a-9b2e-509c4953c740-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a93b7eef-8870-4f5a-9b2e-509c4953c740" (UID: "a93b7eef-8870-4f5a-9b2e-509c4953c740"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.320728 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a93b7eef-8870-4f5a-9b2e-509c4953c740-kube-api-access-gs5rn" (OuterVolumeSpecName: "kube-api-access-gs5rn") pod "a93b7eef-8870-4f5a-9b2e-509c4953c740" (UID: "a93b7eef-8870-4f5a-9b2e-509c4953c740"). InnerVolumeSpecName "kube-api-access-gs5rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.419897 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gs5rn\" (UniqueName: \"kubernetes.io/projected/a93b7eef-8870-4f5a-9b2e-509c4953c740-kube-api-access-gs5rn\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.419960 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a93b7eef-8870-4f5a-9b2e-509c4953c740-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.585738 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.585737 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher6761-account-delete-qt5hz" event={"ID":"a93b7eef-8870-4f5a-9b2e-509c4953c740","Type":"ContainerDied","Data":"f353b123c4f696a51f55739453684087971b52f9252d2454bf9acd94718bbdbc"} Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.586673 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f353b123c4f696a51f55739453684087971b52f9252d2454bf9acd94718bbdbc" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.586043 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-central-agent" containerID="cri-o://ee604e9cd650fda4ebeea6f85926b10a4a2d9630607a1f4c88b9dfc812b2b7de" gracePeriod=30 Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.585827 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.586073 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="sg-core" containerID="cri-o://e64b83cf69c51a9452ab4ee1bfde67ee3d9f09f0707fbeea2fba7eba57d21e29" gracePeriod=30 Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.586124 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="proxy-httpd" containerID="cri-o://84566f1e802e63d0935b2cd17b5f97855a4986b2a02cf2cd62023fcaed760b0b" gracePeriod=30 Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.586090 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-notification-agent" containerID="cri-o://d99b1845f32193eebe1c606f24ded709182f69c0c12f7a2ac1cdb03ad54d8ef1" gracePeriod=30 Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.662428 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:37:18 crc kubenswrapper[4925]: I0121 11:37:18.667980 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.513899 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38b305fc-2199-4df9-a84f-95a98def4162" path="/var/lib/kubelet/pods/38b305fc-2199-4df9-a84f-95a98def4162/volumes" Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.599973 4925 generic.go:334] "Generic (PLEG): container finished" podID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerID="84566f1e802e63d0935b2cd17b5f97855a4986b2a02cf2cd62023fcaed760b0b" exitCode=0 Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.600012 4925 generic.go:334] "Generic (PLEG): container finished" podID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerID="e64b83cf69c51a9452ab4ee1bfde67ee3d9f09f0707fbeea2fba7eba57d21e29" exitCode=2 Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.600023 4925 generic.go:334] "Generic (PLEG): container finished" podID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerID="d99b1845f32193eebe1c606f24ded709182f69c0c12f7a2ac1cdb03ad54d8ef1" exitCode=0 Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.600032 4925 generic.go:334] "Generic (PLEG): container finished" podID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerID="ee604e9cd650fda4ebeea6f85926b10a4a2d9630607a1f4c88b9dfc812b2b7de" exitCode=0 Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.600060 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerDied","Data":"84566f1e802e63d0935b2cd17b5f97855a4986b2a02cf2cd62023fcaed760b0b"} Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.600109 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerDied","Data":"e64b83cf69c51a9452ab4ee1bfde67ee3d9f09f0707fbeea2fba7eba57d21e29"} Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.600125 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerDied","Data":"d99b1845f32193eebe1c606f24ded709182f69c0c12f7a2ac1cdb03ad54d8ef1"} Jan 21 11:37:19 crc kubenswrapper[4925]: I0121 11:37:19.600139 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerDied","Data":"ee604e9cd650fda4ebeea6f85926b10a4a2d9630607a1f4c88b9dfc812b2b7de"} Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.232154 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.370622 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-sg-core-conf-yaml\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.370703 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckrcl\" (UniqueName: \"kubernetes.io/projected/3dd0bf47-7633-4758-abc8-631f03f1734b-kube-api-access-ckrcl\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.370767 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-log-httpd\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.370834 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-run-httpd\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.370908 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-scripts\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.370936 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-config-data\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.370966 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-ceilometer-tls-certs\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.371032 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-combined-ca-bundle\") pod \"3dd0bf47-7633-4758-abc8-631f03f1734b\" (UID: \"3dd0bf47-7633-4758-abc8-631f03f1734b\") " Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.372505 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.379728 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-scripts" (OuterVolumeSpecName: "scripts") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.382206 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.382948 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dd0bf47-7633-4758-abc8-631f03f1734b-kube-api-access-ckrcl" (OuterVolumeSpecName: "kube-api-access-ckrcl") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "kube-api-access-ckrcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.403026 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.471072 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.473323 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.473350 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.473363 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckrcl\" (UniqueName: \"kubernetes.io/projected/3dd0bf47-7633-4758-abc8-631f03f1734b-kube-api-access-ckrcl\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.473378 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.473387 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3dd0bf47-7633-4758-abc8-631f03f1734b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.473413 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.507587 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.524644 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-config-data" (OuterVolumeSpecName: "config-data") pod "3dd0bf47-7633-4758-abc8-631f03f1734b" (UID: "3dd0bf47-7633-4758-abc8-631f03f1734b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.575102 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.575145 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dd0bf47-7633-4758-abc8-631f03f1734b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.616946 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"3dd0bf47-7633-4758-abc8-631f03f1734b","Type":"ContainerDied","Data":"1d3c57c42a2f1142af9fb593bbe0562cfa9540252cb14d021534034748c51bf0"} Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.617024 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.617024 4925 scope.go:117] "RemoveContainer" containerID="84566f1e802e63d0935b2cd17b5f97855a4986b2a02cf2cd62023fcaed760b0b" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.651218 4925 scope.go:117] "RemoveContainer" containerID="e64b83cf69c51a9452ab4ee1bfde67ee3d9f09f0707fbeea2fba7eba57d21e29" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.661038 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.669534 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686183 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686598 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-api" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686620 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-api" Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686640 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38b305fc-2199-4df9-a84f-95a98def4162" containerName="watcher-applier" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686649 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="38b305fc-2199-4df9-a84f-95a98def4162" containerName="watcher-applier" Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686668 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="sg-core" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686674 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="sg-core" Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686689 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-central-agent" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686696 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-central-agent" Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686706 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-notification-agent" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686713 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-notification-agent" Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686724 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="proxy-httpd" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686733 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="proxy-httpd" Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686742 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a93b7eef-8870-4f5a-9b2e-509c4953c740" containerName="mariadb-account-delete" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686750 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="a93b7eef-8870-4f5a-9b2e-509c4953c740" containerName="mariadb-account-delete" Jan 21 11:37:20 crc kubenswrapper[4925]: E0121 11:37:20.686768 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-kuttl-api-log" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686776 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-kuttl-api-log" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686936 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="38b305fc-2199-4df9-a84f-95a98def4162" containerName="watcher-applier" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686953 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-central-agent" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686960 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="sg-core" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686973 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="ceilometer-notification-agent" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686983 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" containerName="proxy-httpd" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.686997 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-kuttl-api-log" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.687009 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="a93b7eef-8870-4f5a-9b2e-509c4953c740" containerName="mariadb-account-delete" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.687018 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b80f39a0-48d4-43bd-9fe2-33a90c94e003" containerName="watcher-api" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.688660 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.690729 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.690949 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.691525 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.709956 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.715630 4925 scope.go:117] "RemoveContainer" containerID="d99b1845f32193eebe1c606f24ded709182f69c0c12f7a2ac1cdb03ad54d8ef1" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.768667 4925 scope.go:117] "RemoveContainer" containerID="ee604e9cd650fda4ebeea6f85926b10a4a2d9630607a1f4c88b9dfc812b2b7de" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778243 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778304 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bxqf\" (UniqueName: \"kubernetes.io/projected/b5473307-4311-4aaf-858e-74f892ec789b-kube-api-access-5bxqf\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778361 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-scripts\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778570 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778597 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-config-data\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778640 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-log-httpd\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778668 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-run-httpd\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.778687 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.896701 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.896796 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-config-data\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.896895 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-log-httpd\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.896971 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-run-httpd\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.897074 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.897148 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.897190 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bxqf\" (UniqueName: \"kubernetes.io/projected/b5473307-4311-4aaf-858e-74f892ec789b-kube-api-access-5bxqf\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.897218 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-scripts\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.901302 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-log-httpd\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.901215 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-run-httpd\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.902707 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.928022 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.932242 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-scripts\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.935452 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.941864 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bxqf\" (UniqueName: \"kubernetes.io/projected/b5473307-4311-4aaf-858e-74f892ec789b-kube-api-access-5bxqf\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:20 crc kubenswrapper[4925]: I0121 11:37:20.943018 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-config-data\") pod \"ceilometer-0\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:21 crc kubenswrapper[4925]: I0121 11:37:21.009724 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:21 crc kubenswrapper[4925]: I0121 11:37:21.519199 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dd0bf47-7633-4758-abc8-631f03f1734b" path="/var/lib/kubelet/pods/3dd0bf47-7633-4758-abc8-631f03f1734b/volumes" Jan 21 11:37:21 crc kubenswrapper[4925]: I0121 11:37:21.666053 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:22 crc kubenswrapper[4925]: E0121 11:37:22.063345 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873 is running failed: container process not found" containerID="d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:37:22 crc kubenswrapper[4925]: E0121 11:37:22.064644 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873 is running failed: container process not found" containerID="d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:37:22 crc kubenswrapper[4925]: E0121 11:37:22.065093 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873 is running failed: container process not found" containerID="d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Jan 21 11:37:22 crc kubenswrapper[4925]: E0121 11:37:22.065263 4925 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873 is running failed: container process not found" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="5906ef45-d0ed-40d7-b844-3ca70ed28c91" containerName="watcher-decision-engine" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.503173 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:37:22 crc kubenswrapper[4925]: E0121 11:37:22.503486 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.704762 4925 generic.go:334] "Generic (PLEG): container finished" podID="5906ef45-d0ed-40d7-b844-3ca70ed28c91" containerID="d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873" exitCode=0 Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.704895 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"5906ef45-d0ed-40d7-b844-3ca70ed28c91","Type":"ContainerDied","Data":"d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873"} Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.704946 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"5906ef45-d0ed-40d7-b844-3ca70ed28c91","Type":"ContainerDied","Data":"f3e3ccf62eaa208570f8878e3ad3cce85d3c713b47fcdd3a024294b67abb576a"} Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.704959 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3e3ccf62eaa208570f8878e3ad3cce85d3c713b47fcdd3a024294b67abb576a" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.766180 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.766219 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerStarted","Data":"82336851aac32e6267683428c998fc7cc9e7f5a0247f728da6dbd7629b947a9b"} Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.858707 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-custom-prometheus-ca\") pod \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.858786 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-cert-memcached-mtls\") pod \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.858847 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-combined-ca-bundle\") pod \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.858940 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-config-data\") pod \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.861416 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pdqz\" (UniqueName: \"kubernetes.io/projected/5906ef45-d0ed-40d7-b844-3ca70ed28c91-kube-api-access-2pdqz\") pod \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.861969 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5906ef45-d0ed-40d7-b844-3ca70ed28c91-logs\") pod \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\" (UID: \"5906ef45-d0ed-40d7-b844-3ca70ed28c91\") " Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.862420 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5906ef45-d0ed-40d7-b844-3ca70ed28c91-logs" (OuterVolumeSpecName: "logs") pod "5906ef45-d0ed-40d7-b844-3ca70ed28c91" (UID: "5906ef45-d0ed-40d7-b844-3ca70ed28c91"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.863350 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5906ef45-d0ed-40d7-b844-3ca70ed28c91-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.899238 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5906ef45-d0ed-40d7-b844-3ca70ed28c91-kube-api-access-2pdqz" (OuterVolumeSpecName: "kube-api-access-2pdqz") pod "5906ef45-d0ed-40d7-b844-3ca70ed28c91" (UID: "5906ef45-d0ed-40d7-b844-3ca70ed28c91"). InnerVolumeSpecName "kube-api-access-2pdqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.965473 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pdqz\" (UniqueName: \"kubernetes.io/projected/5906ef45-d0ed-40d7-b844-3ca70ed28c91-kube-api-access-2pdqz\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.983329 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5906ef45-d0ed-40d7-b844-3ca70ed28c91" (UID: "5906ef45-d0ed-40d7-b844-3ca70ed28c91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:22 crc kubenswrapper[4925]: I0121 11:37:22.991591 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "5906ef45-d0ed-40d7-b844-3ca70ed28c91" (UID: "5906ef45-d0ed-40d7-b844-3ca70ed28c91"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.033672 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-config-data" (OuterVolumeSpecName: "config-data") pod "5906ef45-d0ed-40d7-b844-3ca70ed28c91" (UID: "5906ef45-d0ed-40d7-b844-3ca70ed28c91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.044090 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "5906ef45-d0ed-40d7-b844-3ca70ed28c91" (UID: "5906ef45-d0ed-40d7-b844-3ca70ed28c91"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.067755 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.068149 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.068267 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.068349 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/5906ef45-d0ed-40d7-b844-3ca70ed28c91-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.795495 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerStarted","Data":"e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98"} Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.795682 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.800637 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q664r"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.830252 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-q664r"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.843706 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-6761-account-create-update-8lfh8"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.858990 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-6761-account-create-update-8lfh8"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.871361 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher6761-account-delete-qt5hz"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.896489 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher6761-account-delete-qt5hz"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.912511 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.919475 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.933310 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-db-create-wn9kc"] Jan 21 11:37:23 crc kubenswrapper[4925]: E0121 11:37:23.933915 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5906ef45-d0ed-40d7-b844-3ca70ed28c91" containerName="watcher-decision-engine" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.933946 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5906ef45-d0ed-40d7-b844-3ca70ed28c91" containerName="watcher-decision-engine" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.934211 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5906ef45-d0ed-40d7-b844-3ca70ed28c91" containerName="watcher-decision-engine" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.935201 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:23 crc kubenswrapper[4925]: I0121 11:37:23.940831 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-wn9kc"] Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.062210 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-update-qp7xf"] Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.063704 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.074286 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-db-secret" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.076237 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c364fa54-d06f-486a-ba50-823ac05c6a41-operator-scripts\") pod \"watcher-db-create-wn9kc\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.076294 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f6f16d9-3059-4cf4-b7b3-14668f16677e-operator-scripts\") pod \"watcher-test-account-create-update-qp7xf\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.076447 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9fdq\" (UniqueName: \"kubernetes.io/projected/2f6f16d9-3059-4cf4-b7b3-14668f16677e-kube-api-access-b9fdq\") pod \"watcher-test-account-create-update-qp7xf\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.076500 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdzgp\" (UniqueName: \"kubernetes.io/projected/c364fa54-d06f-486a-ba50-823ac05c6a41-kube-api-access-pdzgp\") pod \"watcher-db-create-wn9kc\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.083867 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-update-qp7xf"] Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.329027 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9fdq\" (UniqueName: \"kubernetes.io/projected/2f6f16d9-3059-4cf4-b7b3-14668f16677e-kube-api-access-b9fdq\") pod \"watcher-test-account-create-update-qp7xf\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.329120 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdzgp\" (UniqueName: \"kubernetes.io/projected/c364fa54-d06f-486a-ba50-823ac05c6a41-kube-api-access-pdzgp\") pod \"watcher-db-create-wn9kc\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.329200 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c364fa54-d06f-486a-ba50-823ac05c6a41-operator-scripts\") pod \"watcher-db-create-wn9kc\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.329235 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f6f16d9-3059-4cf4-b7b3-14668f16677e-operator-scripts\") pod \"watcher-test-account-create-update-qp7xf\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.330227 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f6f16d9-3059-4cf4-b7b3-14668f16677e-operator-scripts\") pod \"watcher-test-account-create-update-qp7xf\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.330682 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c364fa54-d06f-486a-ba50-823ac05c6a41-operator-scripts\") pod \"watcher-db-create-wn9kc\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.361942 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9fdq\" (UniqueName: \"kubernetes.io/projected/2f6f16d9-3059-4cf4-b7b3-14668f16677e-kube-api-access-b9fdq\") pod \"watcher-test-account-create-update-qp7xf\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.362184 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdzgp\" (UniqueName: \"kubernetes.io/projected/c364fa54-d06f-486a-ba50-823ac05c6a41-kube-api-access-pdzgp\") pod \"watcher-db-create-wn9kc\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.392330 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.608767 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:24 crc kubenswrapper[4925]: I0121 11:37:24.820856 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerStarted","Data":"8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b"} Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.051931 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-update-qp7xf"] Jan 21 11:37:25 crc kubenswrapper[4925]: W0121 11:37:25.081605 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f6f16d9_3059_4cf4_b7b3_14668f16677e.slice/crio-f77124447ac7b35a750be7b2838b1c4efc99456488b024171910939a430b6e16 WatchSource:0}: Error finding container f77124447ac7b35a750be7b2838b1c4efc99456488b024171910939a430b6e16: Status 404 returned error can't find the container with id f77124447ac7b35a750be7b2838b1c4efc99456488b024171910939a430b6e16 Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.328882 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-db-create-wn9kc"] Jan 21 11:37:25 crc kubenswrapper[4925]: W0121 11:37:25.331722 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc364fa54_d06f_486a_ba50_823ac05c6a41.slice/crio-6fea288deacf4cf19f121b7b0c81576f0ac5aaef1d7bb15cae5776606cac67f5 WatchSource:0}: Error finding container 6fea288deacf4cf19f121b7b0c81576f0ac5aaef1d7bb15cae5776606cac67f5: Status 404 returned error can't find the container with id 6fea288deacf4cf19f121b7b0c81576f0ac5aaef1d7bb15cae5776606cac67f5 Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.516012 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5906ef45-d0ed-40d7-b844-3ca70ed28c91" path="/var/lib/kubelet/pods/5906ef45-d0ed-40d7-b844-3ca70ed28c91/volumes" Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.516896 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a93b7eef-8870-4f5a-9b2e-509c4953c740" path="/var/lib/kubelet/pods/a93b7eef-8870-4f5a-9b2e-509c4953c740/volumes" Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.517771 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d621d4c5-909d-4fc8-8113-d898b0f87caf" path="/var/lib/kubelet/pods/d621d4c5-909d-4fc8-8113-d898b0f87caf/volumes" Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.519462 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8275c69-592d-4a37-a63b-5797f3d156f4" path="/var/lib/kubelet/pods/e8275c69-592d-4a37-a63b-5797f3d156f4/volumes" Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.833825 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerStarted","Data":"5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61"} Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.836424 4925 generic.go:334] "Generic (PLEG): container finished" podID="c364fa54-d06f-486a-ba50-823ac05c6a41" containerID="e0d0d2f5376d86f549374a5afd726fd09d3be22cd1c1dd2df5b90480e175fd56" exitCode=0 Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.836517 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-wn9kc" event={"ID":"c364fa54-d06f-486a-ba50-823ac05c6a41","Type":"ContainerDied","Data":"e0d0d2f5376d86f549374a5afd726fd09d3be22cd1c1dd2df5b90480e175fd56"} Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.836564 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-wn9kc" event={"ID":"c364fa54-d06f-486a-ba50-823ac05c6a41","Type":"ContainerStarted","Data":"6fea288deacf4cf19f121b7b0c81576f0ac5aaef1d7bb15cae5776606cac67f5"} Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.838776 4925 generic.go:334] "Generic (PLEG): container finished" podID="2f6f16d9-3059-4cf4-b7b3-14668f16677e" containerID="dab276c7373336a43c392d22aaf42a68b8b7f9e4b4fa6accad91ced7556aa2e3" exitCode=0 Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.838818 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" event={"ID":"2f6f16d9-3059-4cf4-b7b3-14668f16677e","Type":"ContainerDied","Data":"dab276c7373336a43c392d22aaf42a68b8b7f9e4b4fa6accad91ced7556aa2e3"} Jan 21 11:37:25 crc kubenswrapper[4925]: I0121 11:37:25.838840 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" event={"ID":"2f6f16d9-3059-4cf4-b7b3-14668f16677e","Type":"ContainerStarted","Data":"f77124447ac7b35a750be7b2838b1c4efc99456488b024171910939a430b6e16"} Jan 21 11:37:26 crc kubenswrapper[4925]: I0121 11:37:26.851802 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerStarted","Data":"6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34"} Jan 21 11:37:26 crc kubenswrapper[4925]: I0121 11:37:26.884559 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=2.278727307 podStartE2EDuration="6.88452146s" podCreationTimestamp="2026-01-21 11:37:20 +0000 UTC" firstStartedPulling="2026-01-21 11:37:21.682192042 +0000 UTC m=+2533.286083986" lastFinishedPulling="2026-01-21 11:37:26.287986205 +0000 UTC m=+2537.891878139" observedRunningTime="2026-01-21 11:37:26.876206205 +0000 UTC m=+2538.480098149" watchObservedRunningTime="2026-01-21 11:37:26.88452146 +0000 UTC m=+2538.488413394" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.355597 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.361120 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdzgp\" (UniqueName: \"kubernetes.io/projected/c364fa54-d06f-486a-ba50-823ac05c6a41-kube-api-access-pdzgp\") pod \"c364fa54-d06f-486a-ba50-823ac05c6a41\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.361348 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c364fa54-d06f-486a-ba50-823ac05c6a41-operator-scripts\") pod \"c364fa54-d06f-486a-ba50-823ac05c6a41\" (UID: \"c364fa54-d06f-486a-ba50-823ac05c6a41\") " Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.361968 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c364fa54-d06f-486a-ba50-823ac05c6a41-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c364fa54-d06f-486a-ba50-823ac05c6a41" (UID: "c364fa54-d06f-486a-ba50-823ac05c6a41"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.362976 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.367536 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c364fa54-d06f-486a-ba50-823ac05c6a41-kube-api-access-pdzgp" (OuterVolumeSpecName: "kube-api-access-pdzgp") pod "c364fa54-d06f-486a-ba50-823ac05c6a41" (UID: "c364fa54-d06f-486a-ba50-823ac05c6a41"). InnerVolumeSpecName "kube-api-access-pdzgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.566725 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c364fa54-d06f-486a-ba50-823ac05c6a41-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.567050 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdzgp\" (UniqueName: \"kubernetes.io/projected/c364fa54-d06f-486a-ba50-823ac05c6a41-kube-api-access-pdzgp\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.668077 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9fdq\" (UniqueName: \"kubernetes.io/projected/2f6f16d9-3059-4cf4-b7b3-14668f16677e-kube-api-access-b9fdq\") pod \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.668822 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f6f16d9-3059-4cf4-b7b3-14668f16677e-operator-scripts\") pod \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\" (UID: \"2f6f16d9-3059-4cf4-b7b3-14668f16677e\") " Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.669618 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f6f16d9-3059-4cf4-b7b3-14668f16677e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2f6f16d9-3059-4cf4-b7b3-14668f16677e" (UID: "2f6f16d9-3059-4cf4-b7b3-14668f16677e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.687649 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f6f16d9-3059-4cf4-b7b3-14668f16677e-kube-api-access-b9fdq" (OuterVolumeSpecName: "kube-api-access-b9fdq") pod "2f6f16d9-3059-4cf4-b7b3-14668f16677e" (UID: "2f6f16d9-3059-4cf4-b7b3-14668f16677e"). InnerVolumeSpecName "kube-api-access-b9fdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.771382 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9fdq\" (UniqueName: \"kubernetes.io/projected/2f6f16d9-3059-4cf4-b7b3-14668f16677e-kube-api-access-b9fdq\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.771455 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f6f16d9-3059-4cf4-b7b3-14668f16677e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.867873 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-db-create-wn9kc" event={"ID":"c364fa54-d06f-486a-ba50-823ac05c6a41","Type":"ContainerDied","Data":"6fea288deacf4cf19f121b7b0c81576f0ac5aaef1d7bb15cae5776606cac67f5"} Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.867921 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-db-create-wn9kc" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.867942 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fea288deacf4cf19f121b7b0c81576f0ac5aaef1d7bb15cae5776606cac67f5" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.876206 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.876352 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-test-account-create-update-qp7xf" event={"ID":"2f6f16d9-3059-4cf4-b7b3-14668f16677e","Type":"ContainerDied","Data":"f77124447ac7b35a750be7b2838b1c4efc99456488b024171910939a430b6e16"} Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.876392 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f77124447ac7b35a750be7b2838b1c4efc99456488b024171910939a430b6e16" Jan 21 11:37:27 crc kubenswrapper[4925]: I0121 11:37:27.876452 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.277396 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f"] Jan 21 11:37:29 crc kubenswrapper[4925]: E0121 11:37:29.278212 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f6f16d9-3059-4cf4-b7b3-14668f16677e" containerName="mariadb-account-create-update" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.278229 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f6f16d9-3059-4cf4-b7b3-14668f16677e" containerName="mariadb-account-create-update" Jan 21 11:37:29 crc kubenswrapper[4925]: E0121 11:37:29.278262 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c364fa54-d06f-486a-ba50-823ac05c6a41" containerName="mariadb-database-create" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.278268 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="c364fa54-d06f-486a-ba50-823ac05c6a41" containerName="mariadb-database-create" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.278444 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f6f16d9-3059-4cf4-b7b3-14668f16677e" containerName="mariadb-account-create-update" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.278465 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="c364fa54-d06f-486a-ba50-823ac05c6a41" containerName="mariadb-database-create" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.279182 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.281766 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.282007 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-999mv" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.298511 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f"] Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.425713 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.425782 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjvsw\" (UniqueName: \"kubernetes.io/projected/15e01463-0f55-4822-8c77-88b6abf58555-kube-api-access-wjvsw\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.425852 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-db-sync-config-data\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.425932 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-config-data\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.527781 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-db-sync-config-data\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.528822 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-config-data\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.528937 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.528964 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjvsw\" (UniqueName: \"kubernetes.io/projected/15e01463-0f55-4822-8c77-88b6abf58555-kube-api-access-wjvsw\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.532825 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-combined-ca-bundle\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.538177 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-db-sync-config-data\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.541570 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-config-data\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.568088 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjvsw\" (UniqueName: \"kubernetes.io/projected/15e01463-0f55-4822-8c77-88b6abf58555-kube-api-access-wjvsw\") pod \"watcher-kuttl-db-sync-vpq6f\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:29 crc kubenswrapper[4925]: I0121 11:37:29.606625 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:30 crc kubenswrapper[4925]: I0121 11:37:30.293903 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f"] Jan 21 11:37:30 crc kubenswrapper[4925]: I0121 11:37:30.922651 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" event={"ID":"15e01463-0f55-4822-8c77-88b6abf58555","Type":"ContainerStarted","Data":"d29f7b67f8ccda56b8e4373ecc0c937b42b9e85289ab486b6cf5d64e69fc8faa"} Jan 21 11:37:31 crc kubenswrapper[4925]: I0121 11:37:31.044873 4925 scope.go:117] "RemoveContainer" containerID="6b81dc42a1cb33f3f18eb36fd756bede6c02ef8b19f6dd37a43055da637c521d" Jan 21 11:37:31 crc kubenswrapper[4925]: I0121 11:37:31.078044 4925 scope.go:117] "RemoveContainer" containerID="34eb2f2c6470b537c3c0d6172f9d00f1fbb89e7eb688abae8f94ccb0d84d1053" Jan 21 11:37:31 crc kubenswrapper[4925]: I0121 11:37:31.110010 4925 scope.go:117] "RemoveContainer" containerID="b2ed6a7944a7c4a09c4e9a3747465c899d29a1f8d6828ac959d2693fb4f85bf9" Jan 21 11:37:31 crc kubenswrapper[4925]: I0121 11:37:31.935807 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" event={"ID":"15e01463-0f55-4822-8c77-88b6abf58555","Type":"ContainerStarted","Data":"e2fd5052c5c331d244523f86308f7ca15f740afe82872aeb5164536ccaa22994"} Jan 21 11:37:31 crc kubenswrapper[4925]: I0121 11:37:31.959054 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" podStartSLOduration=2.959032406 podStartE2EDuration="2.959032406s" podCreationTimestamp="2026-01-21 11:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:37:31.957329663 +0000 UTC m=+2543.561221617" watchObservedRunningTime="2026-01-21 11:37:31.959032406 +0000 UTC m=+2543.562924350" Jan 21 11:37:34 crc kubenswrapper[4925]: I0121 11:37:34.978185 4925 generic.go:334] "Generic (PLEG): container finished" podID="15e01463-0f55-4822-8c77-88b6abf58555" containerID="e2fd5052c5c331d244523f86308f7ca15f740afe82872aeb5164536ccaa22994" exitCode=0 Jan 21 11:37:34 crc kubenswrapper[4925]: I0121 11:37:34.978463 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" event={"ID":"15e01463-0f55-4822-8c77-88b6abf58555","Type":"ContainerDied","Data":"e2fd5052c5c331d244523f86308f7ca15f740afe82872aeb5164536ccaa22994"} Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.461739 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.497759 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-combined-ca-bundle\") pod \"15e01463-0f55-4822-8c77-88b6abf58555\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.497840 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-db-sync-config-data\") pod \"15e01463-0f55-4822-8c77-88b6abf58555\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.498012 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-config-data\") pod \"15e01463-0f55-4822-8c77-88b6abf58555\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.498859 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjvsw\" (UniqueName: \"kubernetes.io/projected/15e01463-0f55-4822-8c77-88b6abf58555-kube-api-access-wjvsw\") pod \"15e01463-0f55-4822-8c77-88b6abf58555\" (UID: \"15e01463-0f55-4822-8c77-88b6abf58555\") " Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.517885 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.520818 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "15e01463-0f55-4822-8c77-88b6abf58555" (UID: "15e01463-0f55-4822-8c77-88b6abf58555"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.524253 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15e01463-0f55-4822-8c77-88b6abf58555-kube-api-access-wjvsw" (OuterVolumeSpecName: "kube-api-access-wjvsw") pod "15e01463-0f55-4822-8c77-88b6abf58555" (UID: "15e01463-0f55-4822-8c77-88b6abf58555"). InnerVolumeSpecName "kube-api-access-wjvsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:36 crc kubenswrapper[4925]: E0121 11:37:36.527997 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.544065 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15e01463-0f55-4822-8c77-88b6abf58555" (UID: "15e01463-0f55-4822-8c77-88b6abf58555"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.566041 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-config-data" (OuterVolumeSpecName: "config-data") pod "15e01463-0f55-4822-8c77-88b6abf58555" (UID: "15e01463-0f55-4822-8c77-88b6abf58555"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.601962 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.602276 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjvsw\" (UniqueName: \"kubernetes.io/projected/15e01463-0f55-4822-8c77-88b6abf58555-kube-api-access-wjvsw\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.602347 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:36 crc kubenswrapper[4925]: I0121 11:37:36.602489 4925 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/15e01463-0f55-4822-8c77-88b6abf58555-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.073551 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" event={"ID":"15e01463-0f55-4822-8c77-88b6abf58555","Type":"ContainerDied","Data":"d29f7b67f8ccda56b8e4373ecc0c937b42b9e85289ab486b6cf5d64e69fc8faa"} Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.073612 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d29f7b67f8ccda56b8e4373ecc0c937b42b9e85289ab486b6cf5d64e69fc8faa" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.073711 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.562567 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:37:37 crc kubenswrapper[4925]: E0121 11:37:37.563105 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e01463-0f55-4822-8c77-88b6abf58555" containerName="watcher-kuttl-db-sync" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.563122 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e01463-0f55-4822-8c77-88b6abf58555" containerName="watcher-kuttl-db-sync" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.563321 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="15e01463-0f55-4822-8c77-88b6abf58555" containerName="watcher-kuttl-db-sync" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.564762 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.573118 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.573909 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-watcher-kuttl-dockercfg-999mv" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.574186 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-api-config-data" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.574672 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.592225 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.602147 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.692865 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.694882 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.706782 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.708609 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-applier-config-data" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.730153 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.751384 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.756612 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.770782 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-decision-engine-config-data" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.773747 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.773800 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.773832 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fldt4\" (UniqueName: \"kubernetes.io/projected/2a70b13c-179d-4bed-a69d-3144d4a91e6f-kube-api-access-fldt4\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.773873 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.773903 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.773945 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8xw4\" (UniqueName: \"kubernetes.io/projected/0d0d0281-6343-43b5-ad81-d89705b152c3-kube-api-access-z8xw4\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.773987 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85hxn\" (UniqueName: \"kubernetes.io/projected/d04bdc1e-390d-4961-b390-11e2c231ac6f-kube-api-access-85hxn\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774038 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774071 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774094 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a70b13c-179d-4bed-a69d-3144d4a91e6f-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774111 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9l4s\" (UniqueName: \"kubernetes.io/projected/69239153-8b79-477f-8b8b-22e84b28872e-kube-api-access-p9l4s\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774131 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0d0281-6343-43b5-ad81-d89705b152c3-logs\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774151 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774173 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774198 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d04bdc1e-390d-4961-b390-11e2c231ac6f-logs\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774221 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774252 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774271 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69239153-8b79-477f-8b8b-22e84b28872e-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774293 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774371 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774458 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774503 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.774542 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.876035 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.876113 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.876161 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69239153-8b79-477f-8b8b-22e84b28872e-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.876843 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69239153-8b79-477f-8b8b-22e84b28872e-logs\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.876192 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.876973 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877563 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877620 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877657 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877718 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877757 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877809 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fldt4\" (UniqueName: \"kubernetes.io/projected/2a70b13c-179d-4bed-a69d-3144d4a91e6f-kube-api-access-fldt4\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877866 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877890 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.877959 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8xw4\" (UniqueName: \"kubernetes.io/projected/0d0d0281-6343-43b5-ad81-d89705b152c3-kube-api-access-z8xw4\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878008 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85hxn\" (UniqueName: \"kubernetes.io/projected/d04bdc1e-390d-4961-b390-11e2c231ac6f-kube-api-access-85hxn\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878038 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878085 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878122 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a70b13c-179d-4bed-a69d-3144d4a91e6f-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878144 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9l4s\" (UniqueName: \"kubernetes.io/projected/69239153-8b79-477f-8b8b-22e84b28872e-kube-api-access-p9l4s\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878178 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0d0281-6343-43b5-ad81-d89705b152c3-logs\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878209 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878252 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878294 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d04bdc1e-390d-4961-b390-11e2c231ac6f-logs\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.878886 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d04bdc1e-390d-4961-b390-11e2c231ac6f-logs\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.879656 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a70b13c-179d-4bed-a69d-3144d4a91e6f-logs\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.880442 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0d0281-6343-43b5-ad81-d89705b152c3-logs\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.883261 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-combined-ca-bundle\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.884363 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-cert-memcached-mtls\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.885346 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-custom-prometheus-ca\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.886443 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-config-data\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.888564 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-config-data\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.889416 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-combined-ca-bundle\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.889499 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-combined-ca-bundle\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.889508 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-config-data\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.889713 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-config-data\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.891094 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-cert-memcached-mtls\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.891241 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-cert-memcached-mtls\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.892471 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-custom-prometheus-ca\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.899800 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-combined-ca-bundle\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.901079 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-cert-memcached-mtls\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.901531 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-custom-prometheus-ca\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.904686 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8xw4\" (UniqueName: \"kubernetes.io/projected/0d0d0281-6343-43b5-ad81-d89705b152c3-kube-api-access-z8xw4\") pod \"watcher-kuttl-api-0\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.904983 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9l4s\" (UniqueName: \"kubernetes.io/projected/69239153-8b79-477f-8b8b-22e84b28872e-kube-api-access-p9l4s\") pod \"watcher-kuttl-applier-0\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.906271 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fldt4\" (UniqueName: \"kubernetes.io/projected/2a70b13c-179d-4bed-a69d-3144d4a91e6f-kube-api-access-fldt4\") pod \"watcher-kuttl-decision-engine-0\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.906966 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85hxn\" (UniqueName: \"kubernetes.io/projected/d04bdc1e-390d-4961-b390-11e2c231ac6f-kube-api-access-85hxn\") pod \"watcher-kuttl-api-1\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:37 crc kubenswrapper[4925]: I0121 11:37:37.914352 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:38 crc kubenswrapper[4925]: I0121 11:37:38.023924 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:38 crc kubenswrapper[4925]: I0121 11:37:38.078039 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:38 crc kubenswrapper[4925]: I0121 11:37:38.218163 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:38 crc kubenswrapper[4925]: I0121 11:37:38.687434 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:37:38 crc kubenswrapper[4925]: W0121 11:37:38.695803 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd04bdc1e_390d_4961_b390_11e2c231ac6f.slice/crio-ac1615633b910ffa0fa083c137fe29228911100d099ba8139f38b6fc2fb94725 WatchSource:0}: Error finding container ac1615633b910ffa0fa083c137fe29228911100d099ba8139f38b6fc2fb94725: Status 404 returned error can't find the container with id ac1615633b910ffa0fa083c137fe29228911100d099ba8139f38b6fc2fb94725 Jan 21 11:37:38 crc kubenswrapper[4925]: I0121 11:37:38.823531 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:37:39 crc kubenswrapper[4925]: I0121 11:37:39.073417 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:37:39 crc kubenswrapper[4925]: W0121 11:37:39.074245 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d0d0281_6343_43b5_ad81_d89705b152c3.slice/crio-c5cac62139e8d35e02edd9389c4508fb390815c6334843c7ba243b17b34b80fc WatchSource:0}: Error finding container c5cac62139e8d35e02edd9389c4508fb390815c6334843c7ba243b17b34b80fc: Status 404 returned error can't find the container with id c5cac62139e8d35e02edd9389c4508fb390815c6334843c7ba243b17b34b80fc Jan 21 11:37:39 crc kubenswrapper[4925]: I0121 11:37:39.105838 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:37:39 crc kubenswrapper[4925]: I0121 11:37:39.277689 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"69239153-8b79-477f-8b8b-22e84b28872e","Type":"ContainerStarted","Data":"f567093fb594ec6b99274461e164c408ea388828ffeedf26d9b54540a826ce8c"} Jan 21 11:37:39 crc kubenswrapper[4925]: I0121 11:37:39.289698 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"0d0d0281-6343-43b5-ad81-d89705b152c3","Type":"ContainerStarted","Data":"c5cac62139e8d35e02edd9389c4508fb390815c6334843c7ba243b17b34b80fc"} Jan 21 11:37:39 crc kubenswrapper[4925]: I0121 11:37:39.296993 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"2a70b13c-179d-4bed-a69d-3144d4a91e6f","Type":"ContainerStarted","Data":"30892169e00d8a307fd74a114b0c6fd8bca556e8baca25b9584e22d217745239"} Jan 21 11:37:39 crc kubenswrapper[4925]: I0121 11:37:39.316948 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"d04bdc1e-390d-4961-b390-11e2c231ac6f","Type":"ContainerStarted","Data":"4068046aaf7384e049003d035c85660c3eb4a53686ca50706418bd92f4655e23"} Jan 21 11:37:39 crc kubenswrapper[4925]: I0121 11:37:39.317558 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"d04bdc1e-390d-4961-b390-11e2c231ac6f","Type":"ContainerStarted","Data":"ac1615633b910ffa0fa083c137fe29228911100d099ba8139f38b6fc2fb94725"} Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.332796 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"0d0d0281-6343-43b5-ad81-d89705b152c3","Type":"ContainerStarted","Data":"becd0b68b11e43453993805013e803c8dc4c727c9b78abbb7411b7d2ed77f83d"} Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.333265 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"0d0d0281-6343-43b5-ad81-d89705b152c3","Type":"ContainerStarted","Data":"ee7d42e131a549068952f99ae2a3e67eac5e5aaefc50ec9999f974cc9fd8db49"} Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.333309 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.342795 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"2a70b13c-179d-4bed-a69d-3144d4a91e6f","Type":"ContainerStarted","Data":"ac43d46300e53edea6ccffa6a9666bd2095fb033d4c3222f87f23d95e241ca79"} Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.348687 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"d04bdc1e-390d-4961-b390-11e2c231ac6f","Type":"ContainerStarted","Data":"14d4d8f75206108171a3dc61b278b2716cbbb682bff74720d220bc4d5c2635cc"} Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.348948 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.350923 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"69239153-8b79-477f-8b8b-22e84b28872e","Type":"ContainerStarted","Data":"8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607"} Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.374888 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-0" podStartSLOduration=3.374862135 podStartE2EDuration="3.374862135s" podCreationTimestamp="2026-01-21 11:37:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:37:40.367759329 +0000 UTC m=+2551.971651263" watchObservedRunningTime="2026-01-21 11:37:40.374862135 +0000 UTC m=+2551.978754069" Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.397254 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podStartSLOduration=3.397226256 podStartE2EDuration="3.397226256s" podCreationTimestamp="2026-01-21 11:37:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:37:40.394768698 +0000 UTC m=+2551.998660642" watchObservedRunningTime="2026-01-21 11:37:40.397226256 +0000 UTC m=+2552.001118190" Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.640384 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-api-1" podStartSLOduration=3.6403604510000003 podStartE2EDuration="3.640360451s" podCreationTimestamp="2026-01-21 11:37:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:37:40.500377723 +0000 UTC m=+2552.104269667" watchObservedRunningTime="2026-01-21 11:37:40.640360451 +0000 UTC m=+2552.244252385" Jan 21 11:37:40 crc kubenswrapper[4925]: I0121 11:37:40.666998 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podStartSLOduration=3.6669721969999998 podStartE2EDuration="3.666972197s" podCreationTimestamp="2026-01-21 11:37:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:37:40.651482285 +0000 UTC m=+2552.255374219" watchObservedRunningTime="2026-01-21 11:37:40.666972197 +0000 UTC m=+2552.270864131" Jan 21 11:37:42 crc kubenswrapper[4925]: I0121 11:37:42.912782 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:42 crc kubenswrapper[4925]: I0121 11:37:42.913239 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:37:43 crc kubenswrapper[4925]: I0121 11:37:43.025796 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:43 crc kubenswrapper[4925]: I0121 11:37:43.278899 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:43 crc kubenswrapper[4925]: I0121 11:37:43.279067 4925 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 11:37:44 crc kubenswrapper[4925]: I0121 11:37:44.179179 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:44 crc kubenswrapper[4925]: I0121 11:37:44.816420 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:47 crc kubenswrapper[4925]: I0121 11:37:47.912219 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:47 crc kubenswrapper[4925]: I0121 11:37:47.919847 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.026147 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.053094 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.078931 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.106471 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.219525 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.225010 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.437597 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.443035 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.451750 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.474173 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.483308 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:37:48 crc kubenswrapper[4925]: I0121 11:37:48.502153 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:37:48 crc kubenswrapper[4925]: E0121 11:37:48.502487 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:37:51 crc kubenswrapper[4925]: I0121 11:37:51.028287 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:51 crc kubenswrapper[4925]: I0121 11:37:51.843725 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:51 crc kubenswrapper[4925]: I0121 11:37:51.844701 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-central-agent" containerID="cri-o://e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98" gracePeriod=30 Jan 21 11:37:51 crc kubenswrapper[4925]: I0121 11:37:51.844757 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="sg-core" containerID="cri-o://5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61" gracePeriod=30 Jan 21 11:37:51 crc kubenswrapper[4925]: I0121 11:37:51.844864 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-notification-agent" containerID="cri-o://8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b" gracePeriod=30 Jan 21 11:37:51 crc kubenswrapper[4925]: I0121 11:37:51.844897 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="proxy-httpd" containerID="cri-o://6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34" gracePeriod=30 Jan 21 11:37:52 crc kubenswrapper[4925]: I0121 11:37:52.534814 4925 generic.go:334] "Generic (PLEG): container finished" podID="b5473307-4311-4aaf-858e-74f892ec789b" containerID="6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34" exitCode=0 Jan 21 11:37:52 crc kubenswrapper[4925]: I0121 11:37:52.534865 4925 generic.go:334] "Generic (PLEG): container finished" podID="b5473307-4311-4aaf-858e-74f892ec789b" containerID="5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61" exitCode=2 Jan 21 11:37:52 crc kubenswrapper[4925]: I0121 11:37:52.534874 4925 generic.go:334] "Generic (PLEG): container finished" podID="b5473307-4311-4aaf-858e-74f892ec789b" containerID="e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98" exitCode=0 Jan 21 11:37:52 crc kubenswrapper[4925]: I0121 11:37:52.534891 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerDied","Data":"6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34"} Jan 21 11:37:52 crc kubenswrapper[4925]: I0121 11:37:52.534951 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerDied","Data":"5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61"} Jan 21 11:37:52 crc kubenswrapper[4925]: I0121 11:37:52.534962 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerDied","Data":"e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98"} Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.317016 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.443650 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-sg-core-conf-yaml\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.444145 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-ceilometer-tls-certs\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.444223 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-combined-ca-bundle\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.444264 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-run-httpd\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.444291 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-config-data\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.444335 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-scripts\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.444438 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-log-httpd\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.444518 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bxqf\" (UniqueName: \"kubernetes.io/projected/b5473307-4311-4aaf-858e-74f892ec789b-kube-api-access-5bxqf\") pod \"b5473307-4311-4aaf-858e-74f892ec789b\" (UID: \"b5473307-4311-4aaf-858e-74f892ec789b\") " Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.447341 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.447530 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.451627 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5473307-4311-4aaf-858e-74f892ec789b-kube-api-access-5bxqf" (OuterVolumeSpecName: "kube-api-access-5bxqf") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "kube-api-access-5bxqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.452091 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-scripts" (OuterVolumeSpecName: "scripts") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.471881 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.501040 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.533087 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.549954 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bxqf\" (UniqueName: \"kubernetes.io/projected/b5473307-4311-4aaf-858e-74f892ec789b-kube-api-access-5bxqf\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.549981 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.549991 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.550001 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.550010 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.550018 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.550026 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b5473307-4311-4aaf-858e-74f892ec789b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.554903 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-config-data" (OuterVolumeSpecName: "config-data") pod "b5473307-4311-4aaf-858e-74f892ec789b" (UID: "b5473307-4311-4aaf-858e-74f892ec789b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.570748 4925 generic.go:334] "Generic (PLEG): container finished" podID="b5473307-4311-4aaf-858e-74f892ec789b" containerID="8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b" exitCode=0 Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.570797 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerDied","Data":"8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b"} Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.570826 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"b5473307-4311-4aaf-858e-74f892ec789b","Type":"ContainerDied","Data":"82336851aac32e6267683428c998fc7cc9e7f5a0247f728da6dbd7629b947a9b"} Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.570845 4925 scope.go:117] "RemoveContainer" containerID="6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.570997 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.610040 4925 scope.go:117] "RemoveContainer" containerID="5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.625883 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.635117 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.651813 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5473307-4311-4aaf-858e-74f892ec789b-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.658788 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.659372 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-notification-agent" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659444 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-notification-agent" Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.659480 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-central-agent" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659490 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-central-agent" Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.659511 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="proxy-httpd" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659519 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="proxy-httpd" Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.659538 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="sg-core" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659546 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="sg-core" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659832 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-central-agent" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659851 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="ceilometer-notification-agent" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659865 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="sg-core" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.659879 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5473307-4311-4aaf-858e-74f892ec789b" containerName="proxy-httpd" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.662417 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.664951 4925 scope.go:117] "RemoveContainer" containerID="8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.665382 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.665465 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.665721 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.679155 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.719238 4925 scope.go:117] "RemoveContainer" containerID="e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754026 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754122 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-run-httpd\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754163 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754183 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-log-httpd\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754221 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-scripts\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754297 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-config-data\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754323 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8l7x\" (UniqueName: \"kubernetes.io/projected/51a8baf6-0755-458f-8c18-0e9bf4892ccd-kube-api-access-q8l7x\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.754375 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.760610 4925 scope.go:117] "RemoveContainer" containerID="6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34" Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.761274 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34\": container with ID starting with 6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34 not found: ID does not exist" containerID="6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.761369 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34"} err="failed to get container status \"6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34\": rpc error: code = NotFound desc = could not find container \"6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34\": container with ID starting with 6d9a4d18768d6321550c5eeaba7e28fbd19498af564b7ec50197fa5e1a5a2f34 not found: ID does not exist" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.761445 4925 scope.go:117] "RemoveContainer" containerID="5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61" Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.761847 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61\": container with ID starting with 5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61 not found: ID does not exist" containerID="5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.761906 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61"} err="failed to get container status \"5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61\": rpc error: code = NotFound desc = could not find container \"5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61\": container with ID starting with 5e25fa77ea78bfa797349f27d68196ab618769e5c06e7c5175395e27229e1b61 not found: ID does not exist" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.761941 4925 scope.go:117] "RemoveContainer" containerID="8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b" Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.762298 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b\": container with ID starting with 8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b not found: ID does not exist" containerID="8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.762372 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b"} err="failed to get container status \"8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b\": rpc error: code = NotFound desc = could not find container \"8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b\": container with ID starting with 8296d7ce17f4a3d3c44b2d1531141bfe4424571beea793b9b9c9477ba7ce8a1b not found: ID does not exist" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.762444 4925 scope.go:117] "RemoveContainer" containerID="e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98" Jan 21 11:37:55 crc kubenswrapper[4925]: E0121 11:37:55.762740 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98\": container with ID starting with e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98 not found: ID does not exist" containerID="e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.762765 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98"} err="failed to get container status \"e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98\": rpc error: code = NotFound desc = could not find container \"e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98\": container with ID starting with e96fa8b02028da744999888186c8364dceb68ceb7bb36dae79a6bd780e113a98 not found: ID does not exist" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856475 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856573 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-run-httpd\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856597 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856620 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-log-httpd\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856859 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-scripts\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856934 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-config-data\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856955 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8l7x\" (UniqueName: \"kubernetes.io/projected/51a8baf6-0755-458f-8c18-0e9bf4892ccd-kube-api-access-q8l7x\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.856992 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.857621 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-log-httpd\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.858052 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-run-httpd\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.862443 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.863194 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.863225 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.864503 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-scripts\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.866268 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-config-data\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:55 crc kubenswrapper[4925]: I0121 11:37:55.888103 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8l7x\" (UniqueName: \"kubernetes.io/projected/51a8baf6-0755-458f-8c18-0e9bf4892ccd-kube-api-access-q8l7x\") pod \"ceilometer-0\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:56 crc kubenswrapper[4925]: I0121 11:37:56.022695 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:37:56 crc kubenswrapper[4925]: I0121 11:37:56.565191 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:37:56 crc kubenswrapper[4925]: I0121 11:37:56.587633 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerStarted","Data":"acadb841afecc880168e486fc3d15472d10400b290254e494950c7184d682fc5"} Jan 21 11:37:57 crc kubenswrapper[4925]: I0121 11:37:57.514741 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5473307-4311-4aaf-858e-74f892ec789b" path="/var/lib/kubelet/pods/b5473307-4311-4aaf-858e-74f892ec789b/volumes" Jan 21 11:37:57 crc kubenswrapper[4925]: I0121 11:37:57.603701 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerStarted","Data":"0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02"} Jan 21 11:37:58 crc kubenswrapper[4925]: I0121 11:37:58.616605 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerStarted","Data":"88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2"} Jan 21 11:37:59 crc kubenswrapper[4925]: I0121 11:37:59.514206 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:37:59 crc kubenswrapper[4925]: E0121 11:37:59.515011 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:37:59 crc kubenswrapper[4925]: I0121 11:37:59.629902 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerStarted","Data":"306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634"} Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.139481 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh"] Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.141378 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.147900 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-config-data" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.149525 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"watcher-kuttl-scripts" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.153788 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh"] Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.238734 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-config-data\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.238854 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmhrq\" (UniqueName: \"kubernetes.io/projected/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-kube-api-access-vmhrq\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.238927 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-scripts-volume\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.239139 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-combined-ca-bundle\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.340941 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-scripts-volume\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.341028 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-combined-ca-bundle\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.341151 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-config-data\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.341218 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmhrq\" (UniqueName: \"kubernetes.io/projected/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-kube-api-access-vmhrq\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.346487 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-config-data\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.350234 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-combined-ca-bundle\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.350747 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-scripts-volume\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.364926 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmhrq\" (UniqueName: \"kubernetes.io/projected/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-kube-api-access-vmhrq\") pod \"watcher-kuttl-db-purge-29483258-nvwbh\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.462115 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.648953 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerStarted","Data":"26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62"} Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.649827 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.695645 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=1.999392522 podStartE2EDuration="5.695619511s" podCreationTimestamp="2026-01-21 11:37:55 +0000 UTC" firstStartedPulling="2026-01-21 11:37:56.565202805 +0000 UTC m=+2568.169094739" lastFinishedPulling="2026-01-21 11:38:00.261429794 +0000 UTC m=+2571.865321728" observedRunningTime="2026-01-21 11:38:00.688581518 +0000 UTC m=+2572.292473462" watchObservedRunningTime="2026-01-21 11:38:00.695619511 +0000 UTC m=+2572.299511445" Jan 21 11:38:00 crc kubenswrapper[4925]: I0121 11:38:00.776933 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh"] Jan 21 11:38:00 crc kubenswrapper[4925]: W0121 11:38:00.789069 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d1d5ff3_17ad_4c0f_bd87_47fda0308016.slice/crio-c30bf0f2583b14facfdd4a7a5f70ac498335c2e79527769f086ce3e07496a599 WatchSource:0}: Error finding container c30bf0f2583b14facfdd4a7a5f70ac498335c2e79527769f086ce3e07496a599: Status 404 returned error can't find the container with id c30bf0f2583b14facfdd4a7a5f70ac498335c2e79527769f086ce3e07496a599 Jan 21 11:38:01 crc kubenswrapper[4925]: I0121 11:38:01.662610 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" event={"ID":"2d1d5ff3-17ad-4c0f-bd87-47fda0308016","Type":"ContainerStarted","Data":"648418b220e1e235e2049d99914bc98ac17d884fb07a3e47ad72eb36b6fec672"} Jan 21 11:38:01 crc kubenswrapper[4925]: I0121 11:38:01.663059 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" event={"ID":"2d1d5ff3-17ad-4c0f-bd87-47fda0308016","Type":"ContainerStarted","Data":"c30bf0f2583b14facfdd4a7a5f70ac498335c2e79527769f086ce3e07496a599"} Jan 21 11:38:01 crc kubenswrapper[4925]: I0121 11:38:01.720208 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" podStartSLOduration=1.720149976 podStartE2EDuration="1.720149976s" podCreationTimestamp="2026-01-21 11:38:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:38:01.718200305 +0000 UTC m=+2573.322092239" watchObservedRunningTime="2026-01-21 11:38:01.720149976 +0000 UTC m=+2573.324041920" Jan 21 11:38:03 crc kubenswrapper[4925]: I0121 11:38:03.684167 4925 generic.go:334] "Generic (PLEG): container finished" podID="2d1d5ff3-17ad-4c0f-bd87-47fda0308016" containerID="648418b220e1e235e2049d99914bc98ac17d884fb07a3e47ad72eb36b6fec672" exitCode=0 Jan 21 11:38:03 crc kubenswrapper[4925]: I0121 11:38:03.684340 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" event={"ID":"2d1d5ff3-17ad-4c0f-bd87-47fda0308016","Type":"ContainerDied","Data":"648418b220e1e235e2049d99914bc98ac17d884fb07a3e47ad72eb36b6fec672"} Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.066057 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.165284 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-combined-ca-bundle\") pod \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.165567 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmhrq\" (UniqueName: \"kubernetes.io/projected/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-kube-api-access-vmhrq\") pod \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.165612 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-scripts-volume\") pod \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.165637 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-config-data\") pod \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\" (UID: \"2d1d5ff3-17ad-4c0f-bd87-47fda0308016\") " Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.172241 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-scripts-volume" (OuterVolumeSpecName: "scripts-volume") pod "2d1d5ff3-17ad-4c0f-bd87-47fda0308016" (UID: "2d1d5ff3-17ad-4c0f-bd87-47fda0308016"). InnerVolumeSpecName "scripts-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.244242 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-kube-api-access-vmhrq" (OuterVolumeSpecName: "kube-api-access-vmhrq") pod "2d1d5ff3-17ad-4c0f-bd87-47fda0308016" (UID: "2d1d5ff3-17ad-4c0f-bd87-47fda0308016"). InnerVolumeSpecName "kube-api-access-vmhrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.258828 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d1d5ff3-17ad-4c0f-bd87-47fda0308016" (UID: "2d1d5ff3-17ad-4c0f-bd87-47fda0308016"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.269058 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmhrq\" (UniqueName: \"kubernetes.io/projected/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-kube-api-access-vmhrq\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.269122 4925 reconciler_common.go:293] "Volume detached for volume \"scripts-volume\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-scripts-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.269137 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.305845 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-config-data" (OuterVolumeSpecName: "config-data") pod "2d1d5ff3-17ad-4c0f-bd87-47fda0308016" (UID: "2d1d5ff3-17ad-4c0f-bd87-47fda0308016"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.370946 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d1d5ff3-17ad-4c0f-bd87-47fda0308016-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.707977 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" event={"ID":"2d1d5ff3-17ad-4c0f-bd87-47fda0308016","Type":"ContainerDied","Data":"c30bf0f2583b14facfdd4a7a5f70ac498335c2e79527769f086ce3e07496a599"} Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.708043 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c30bf0f2583b14facfdd4a7a5f70ac498335c2e79527769f086ce3e07496a599" Jan 21 11:38:05 crc kubenswrapper[4925]: I0121 11:38:05.708023 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh" Jan 21 11:38:10 crc kubenswrapper[4925]: I0121 11:38:10.998991 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.008134 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-sync-vpq6f"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.022199 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.031170 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-db-purge-29483258-nvwbh"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.104283 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-lbzrk"] Jan 21 11:38:11 crc kubenswrapper[4925]: E0121 11:38:11.104742 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d1d5ff3-17ad-4c0f-bd87-47fda0308016" containerName="watcher-db-manage" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.104761 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d1d5ff3-17ad-4c0f-bd87-47fda0308016" containerName="watcher-db-manage" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.104936 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d1d5ff3-17ad-4c0f-bd87-47fda0308016" containerName="watcher-db-manage" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.109809 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.146489 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-lbzrk"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.167782 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.168429 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" podUID="2a70b13c-179d-4bed-a69d-3144d4a91e6f" containerName="watcher-decision-engine" containerID="cri-o://ac43d46300e53edea6ccffa6a9666bd2095fb033d4c3222f87f23d95e241ca79" gracePeriod=30 Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.210514 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7778e713-1317-48da-a126-8110565993cf-operator-scripts\") pod \"watchertest-account-delete-lbzrk\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.210687 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-645w6\" (UniqueName: \"kubernetes.io/projected/7778e713-1317-48da-a126-8110565993cf-kube-api-access-645w6\") pod \"watchertest-account-delete-lbzrk\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.259185 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.259486 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="69239153-8b79-477f-8b8b-22e84b28872e" containerName="watcher-applier" containerID="cri-o://8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" gracePeriod=30 Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.315459 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-645w6\" (UniqueName: \"kubernetes.io/projected/7778e713-1317-48da-a126-8110565993cf-kube-api-access-645w6\") pod \"watchertest-account-delete-lbzrk\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.315750 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7778e713-1317-48da-a126-8110565993cf-operator-scripts\") pod \"watchertest-account-delete-lbzrk\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.321354 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7778e713-1317-48da-a126-8110565993cf-operator-scripts\") pod \"watchertest-account-delete-lbzrk\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.349648 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.350034 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-kuttl-api-log" containerID="cri-o://ee7d42e131a549068952f99ae2a3e67eac5e5aaefc50ec9999f974cc9fd8db49" gracePeriod=30 Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.350596 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-api" containerID="cri-o://becd0b68b11e43453993805013e803c8dc4c727c9b78abbb7411b7d2ed77f83d" gracePeriod=30 Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.397475 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.397768 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-kuttl-api-log" containerID="cri-o://4068046aaf7384e049003d035c85660c3eb4a53686ca50706418bd92f4655e23" gracePeriod=30 Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.398283 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-api" containerID="cri-o://14d4d8f75206108171a3dc61b278b2716cbbb682bff74720d220bc4d5c2635cc" gracePeriod=30 Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.414901 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-645w6\" (UniqueName: \"kubernetes.io/projected/7778e713-1317-48da-a126-8110565993cf-kube-api-access-645w6\") pod \"watchertest-account-delete-lbzrk\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.492045 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.559973 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15e01463-0f55-4822-8c77-88b6abf58555" path="/var/lib/kubelet/pods/15e01463-0f55-4822-8c77-88b6abf58555/volumes" Jan 21 11:38:11 crc kubenswrapper[4925]: I0121 11:38:11.560730 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d1d5ff3-17ad-4c0f-bd87-47fda0308016" path="/var/lib/kubelet/pods/2d1d5ff3-17ad-4c0f-bd87-47fda0308016/volumes" Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.404673 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-lbzrk"] Jan 21 11:38:12 crc kubenswrapper[4925]: W0121 11:38:12.410313 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7778e713_1317_48da_a126_8110565993cf.slice/crio-42972bcaa13a182ad99874790294ee3edd03c9b2b7169df64fe1d829d85d859d WatchSource:0}: Error finding container 42972bcaa13a182ad99874790294ee3edd03c9b2b7169df64fe1d829d85d859d: Status 404 returned error can't find the container with id 42972bcaa13a182ad99874790294ee3edd03c9b2b7169df64fe1d829d85d859d Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.807740 4925 generic.go:334] "Generic (PLEG): container finished" podID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerID="ee7d42e131a549068952f99ae2a3e67eac5e5aaefc50ec9999f974cc9fd8db49" exitCode=143 Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.807842 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"0d0d0281-6343-43b5-ad81-d89705b152c3","Type":"ContainerDied","Data":"ee7d42e131a549068952f99ae2a3e67eac5e5aaefc50ec9999f974cc9fd8db49"} Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.811919 4925 generic.go:334] "Generic (PLEG): container finished" podID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerID="4068046aaf7384e049003d035c85660c3eb4a53686ca50706418bd92f4655e23" exitCode=143 Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.812005 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"d04bdc1e-390d-4961-b390-11e2c231ac6f","Type":"ContainerDied","Data":"4068046aaf7384e049003d035c85660c3eb4a53686ca50706418bd92f4655e23"} Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.815070 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" event={"ID":"7778e713-1317-48da-a126-8110565993cf","Type":"ContainerStarted","Data":"e77362fc75425c5d7bd8f945869d7962a047b58e56f348e94279ef1e78a5fde0"} Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.815124 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" event={"ID":"7778e713-1317-48da-a126-8110565993cf","Type":"ContainerStarted","Data":"42972bcaa13a182ad99874790294ee3edd03c9b2b7169df64fe1d829d85d859d"} Jan 21 11:38:12 crc kubenswrapper[4925]: I0121 11:38:12.841527 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" podStartSLOduration=1.8415015860000001 podStartE2EDuration="1.841501586s" podCreationTimestamp="2026-01-21 11:38:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 11:38:12.834668118 +0000 UTC m=+2584.438560052" watchObservedRunningTime="2026-01-21 11:38:12.841501586 +0000 UTC m=+2584.445393520" Jan 21 11:38:13 crc kubenswrapper[4925]: E0121 11:38:13.028373 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:38:13 crc kubenswrapper[4925]: E0121 11:38:13.031645 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:38:13 crc kubenswrapper[4925]: E0121 11:38:13.033707 4925 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 21 11:38:13 crc kubenswrapper[4925]: E0121 11:38:13.033807 4925 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-applier-0" podUID="69239153-8b79-477f-8b8b-22e84b28872e" containerName="watcher-applier" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.503252 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:38:13 crc kubenswrapper[4925]: E0121 11:38:13.503555 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.633635 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.660349 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.228:9322/\": read tcp 10.217.0.2:50860->10.217.0.228:9322: read: connection reset by peer" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.660370 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-0" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.228:9322/\": read tcp 10.217.0.2:50870->10.217.0.228:9322: read: connection reset by peer" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.688410 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-kuttl-api-log" probeResult="failure" output="Get \"http://10.217.0.229:9322/\": read tcp 10.217.0.2:41254->10.217.0.229:9322: read: connection reset by peer" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.688595 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/watcher-kuttl-api-1" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.229:9322/\": read tcp 10.217.0.2:41270->10.217.0.229:9322: read: connection reset by peer" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.770288 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-config-data\") pod \"69239153-8b79-477f-8b8b-22e84b28872e\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.770429 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-cert-memcached-mtls\") pod \"69239153-8b79-477f-8b8b-22e84b28872e\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.770634 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9l4s\" (UniqueName: \"kubernetes.io/projected/69239153-8b79-477f-8b8b-22e84b28872e-kube-api-access-p9l4s\") pod \"69239153-8b79-477f-8b8b-22e84b28872e\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.770758 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-combined-ca-bundle\") pod \"69239153-8b79-477f-8b8b-22e84b28872e\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.770878 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69239153-8b79-477f-8b8b-22e84b28872e-logs\") pod \"69239153-8b79-477f-8b8b-22e84b28872e\" (UID: \"69239153-8b79-477f-8b8b-22e84b28872e\") " Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.772268 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69239153-8b79-477f-8b8b-22e84b28872e-logs" (OuterVolumeSpecName: "logs") pod "69239153-8b79-477f-8b8b-22e84b28872e" (UID: "69239153-8b79-477f-8b8b-22e84b28872e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.849831 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69239153-8b79-477f-8b8b-22e84b28872e-kube-api-access-p9l4s" (OuterVolumeSpecName: "kube-api-access-p9l4s") pod "69239153-8b79-477f-8b8b-22e84b28872e" (UID: "69239153-8b79-477f-8b8b-22e84b28872e"). InnerVolumeSpecName "kube-api-access-p9l4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.852343 4925 generic.go:334] "Generic (PLEG): container finished" podID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerID="14d4d8f75206108171a3dc61b278b2716cbbb682bff74720d220bc4d5c2635cc" exitCode=0 Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.852585 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"d04bdc1e-390d-4961-b390-11e2c231ac6f","Type":"ContainerDied","Data":"14d4d8f75206108171a3dc61b278b2716cbbb682bff74720d220bc4d5c2635cc"} Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.856781 4925 generic.go:334] "Generic (PLEG): container finished" podID="69239153-8b79-477f-8b8b-22e84b28872e" containerID="8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" exitCode=0 Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.856996 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"69239153-8b79-477f-8b8b-22e84b28872e","Type":"ContainerDied","Data":"8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607"} Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.857093 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-applier-0" event={"ID":"69239153-8b79-477f-8b8b-22e84b28872e","Type":"ContainerDied","Data":"f567093fb594ec6b99274461e164c408ea388828ffeedf26d9b54540a826ce8c"} Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.857177 4925 scope.go:117] "RemoveContainer" containerID="8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.857475 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-applier-0" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.863818 4925 generic.go:334] "Generic (PLEG): container finished" podID="7778e713-1317-48da-a126-8110565993cf" containerID="e77362fc75425c5d7bd8f945869d7962a047b58e56f348e94279ef1e78a5fde0" exitCode=0 Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.863984 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" event={"ID":"7778e713-1317-48da-a126-8110565993cf","Type":"ContainerDied","Data":"e77362fc75425c5d7bd8f945869d7962a047b58e56f348e94279ef1e78a5fde0"} Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.875519 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"0d0d0281-6343-43b5-ad81-d89705b152c3","Type":"ContainerDied","Data":"becd0b68b11e43453993805013e803c8dc4c727c9b78abbb7411b7d2ed77f83d"} Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.876107 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9l4s\" (UniqueName: \"kubernetes.io/projected/69239153-8b79-477f-8b8b-22e84b28872e-kube-api-access-p9l4s\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.876160 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69239153-8b79-477f-8b8b-22e84b28872e-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.882427 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69239153-8b79-477f-8b8b-22e84b28872e" (UID: "69239153-8b79-477f-8b8b-22e84b28872e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.871390 4925 generic.go:334] "Generic (PLEG): container finished" podID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerID="becd0b68b11e43453993805013e803c8dc4c727c9b78abbb7411b7d2ed77f83d" exitCode=0 Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.928182 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-config-data" (OuterVolumeSpecName: "config-data") pod "69239153-8b79-477f-8b8b-22e84b28872e" (UID: "69239153-8b79-477f-8b8b-22e84b28872e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.980304 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.980344 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.986206 4925 scope.go:117] "RemoveContainer" containerID="8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" Jan 21 11:38:13 crc kubenswrapper[4925]: E0121 11:38:13.987250 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607\": container with ID starting with 8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607 not found: ID does not exist" containerID="8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.987312 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607"} err="failed to get container status \"8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607\": rpc error: code = NotFound desc = could not find container \"8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607\": container with ID starting with 8f79432a05385231af02ad1839eedbda8b68826a3d1e458c690287eba9bc8607 not found: ID does not exist" Jan 21 11:38:13 crc kubenswrapper[4925]: I0121 11:38:13.990651 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "69239153-8b79-477f-8b8b-22e84b28872e" (UID: "69239153-8b79-477f-8b8b-22e84b28872e"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.082580 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/69239153-8b79-477f-8b8b-22e84b28872e-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.089535 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.183343 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-combined-ca-bundle\") pod \"0d0d0281-6343-43b5-ad81-d89705b152c3\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.183423 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0d0281-6343-43b5-ad81-d89705b152c3-logs\") pod \"0d0d0281-6343-43b5-ad81-d89705b152c3\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.183470 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8xw4\" (UniqueName: \"kubernetes.io/projected/0d0d0281-6343-43b5-ad81-d89705b152c3-kube-api-access-z8xw4\") pod \"0d0d0281-6343-43b5-ad81-d89705b152c3\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.183513 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-config-data\") pod \"0d0d0281-6343-43b5-ad81-d89705b152c3\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.183598 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-custom-prometheus-ca\") pod \"0d0d0281-6343-43b5-ad81-d89705b152c3\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.184499 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-cert-memcached-mtls\") pod \"0d0d0281-6343-43b5-ad81-d89705b152c3\" (UID: \"0d0d0281-6343-43b5-ad81-d89705b152c3\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.186250 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d0d0281-6343-43b5-ad81-d89705b152c3-logs" (OuterVolumeSpecName: "logs") pod "0d0d0281-6343-43b5-ad81-d89705b152c3" (UID: "0d0d0281-6343-43b5-ad81-d89705b152c3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.193389 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d0d0281-6343-43b5-ad81-d89705b152c3-kube-api-access-z8xw4" (OuterVolumeSpecName: "kube-api-access-z8xw4") pod "0d0d0281-6343-43b5-ad81-d89705b152c3" (UID: "0d0d0281-6343-43b5-ad81-d89705b152c3"). InnerVolumeSpecName "kube-api-access-z8xw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.228522 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.228839 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "0d0d0281-6343-43b5-ad81-d89705b152c3" (UID: "0d0d0281-6343-43b5-ad81-d89705b152c3"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.231725 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d0d0281-6343-43b5-ad81-d89705b152c3" (UID: "0d0d0281-6343-43b5-ad81-d89705b152c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.236098 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-applier-0"] Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.267097 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-config-data" (OuterVolumeSpecName: "config-data") pod "0d0d0281-6343-43b5-ad81-d89705b152c3" (UID: "0d0d0281-6343-43b5-ad81-d89705b152c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.281876 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "0d0d0281-6343-43b5-ad81-d89705b152c3" (UID: "0d0d0281-6343-43b5-ad81-d89705b152c3"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.287978 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8xw4\" (UniqueName: \"kubernetes.io/projected/0d0d0281-6343-43b5-ad81-d89705b152c3-kube-api-access-z8xw4\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.288043 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.288058 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.288067 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.288077 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0d0281-6343-43b5-ad81-d89705b152c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.288087 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0d0281-6343-43b5-ad81-d89705b152c3-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.340286 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.495911 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-combined-ca-bundle\") pod \"d04bdc1e-390d-4961-b390-11e2c231ac6f\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.496027 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-custom-prometheus-ca\") pod \"d04bdc1e-390d-4961-b390-11e2c231ac6f\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.496228 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85hxn\" (UniqueName: \"kubernetes.io/projected/d04bdc1e-390d-4961-b390-11e2c231ac6f-kube-api-access-85hxn\") pod \"d04bdc1e-390d-4961-b390-11e2c231ac6f\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.496261 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-config-data\") pod \"d04bdc1e-390d-4961-b390-11e2c231ac6f\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.496294 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d04bdc1e-390d-4961-b390-11e2c231ac6f-logs\") pod \"d04bdc1e-390d-4961-b390-11e2c231ac6f\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.496319 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-cert-memcached-mtls\") pod \"d04bdc1e-390d-4961-b390-11e2c231ac6f\" (UID: \"d04bdc1e-390d-4961-b390-11e2c231ac6f\") " Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.497023 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d04bdc1e-390d-4961-b390-11e2c231ac6f-logs" (OuterVolumeSpecName: "logs") pod "d04bdc1e-390d-4961-b390-11e2c231ac6f" (UID: "d04bdc1e-390d-4961-b390-11e2c231ac6f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.502467 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d04bdc1e-390d-4961-b390-11e2c231ac6f-kube-api-access-85hxn" (OuterVolumeSpecName: "kube-api-access-85hxn") pod "d04bdc1e-390d-4961-b390-11e2c231ac6f" (UID: "d04bdc1e-390d-4961-b390-11e2c231ac6f"). InnerVolumeSpecName "kube-api-access-85hxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.526253 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d04bdc1e-390d-4961-b390-11e2c231ac6f" (UID: "d04bdc1e-390d-4961-b390-11e2c231ac6f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.532418 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "d04bdc1e-390d-4961-b390-11e2c231ac6f" (UID: "d04bdc1e-390d-4961-b390-11e2c231ac6f"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.555638 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-config-data" (OuterVolumeSpecName: "config-data") pod "d04bdc1e-390d-4961-b390-11e2c231ac6f" (UID: "d04bdc1e-390d-4961-b390-11e2c231ac6f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.576259 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "d04bdc1e-390d-4961-b390-11e2c231ac6f" (UID: "d04bdc1e-390d-4961-b390-11e2c231ac6f"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.598748 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85hxn\" (UniqueName: \"kubernetes.io/projected/d04bdc1e-390d-4961-b390-11e2c231ac6f-kube-api-access-85hxn\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.598793 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.598804 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d04bdc1e-390d-4961-b390-11e2c231ac6f-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.598813 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.598828 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.598836 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/d04bdc1e-390d-4961-b390-11e2c231ac6f-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.921221 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-1" event={"ID":"d04bdc1e-390d-4961-b390-11e2c231ac6f","Type":"ContainerDied","Data":"ac1615633b910ffa0fa083c137fe29228911100d099ba8139f38b6fc2fb94725"} Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.921674 4925 scope.go:117] "RemoveContainer" containerID="14d4d8f75206108171a3dc61b278b2716cbbb682bff74720d220bc4d5c2635cc" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.921868 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-1" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.943776 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-api-0" event={"ID":"0d0d0281-6343-43b5-ad81-d89705b152c3","Type":"ContainerDied","Data":"c5cac62139e8d35e02edd9389c4508fb390815c6334843c7ba243b17b34b80fc"} Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.943888 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-api-0" Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.969820 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:38:14 crc kubenswrapper[4925]: I0121 11:38:14.974937 4925 scope.go:117] "RemoveContainer" containerID="4068046aaf7384e049003d035c85660c3eb4a53686ca50706418bd92f4655e23" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.069581 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-1"] Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.092192 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.103746 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-api-0"] Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.104915 4925 scope.go:117] "RemoveContainer" containerID="becd0b68b11e43453993805013e803c8dc4c727c9b78abbb7411b7d2ed77f83d" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.132759 4925 scope.go:117] "RemoveContainer" containerID="ee7d42e131a549068952f99ae2a3e67eac5e5aaefc50ec9999f974cc9fd8db49" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.532216 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" path="/var/lib/kubelet/pods/0d0d0281-6343-43b5-ad81-d89705b152c3/volumes" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.535004 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69239153-8b79-477f-8b8b-22e84b28872e" path="/var/lib/kubelet/pods/69239153-8b79-477f-8b8b-22e84b28872e/volumes" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.535726 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" path="/var/lib/kubelet/pods/d04bdc1e-390d-4961-b390-11e2c231ac6f/volumes" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.596186 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.768480 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-645w6\" (UniqueName: \"kubernetes.io/projected/7778e713-1317-48da-a126-8110565993cf-kube-api-access-645w6\") pod \"7778e713-1317-48da-a126-8110565993cf\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.768602 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7778e713-1317-48da-a126-8110565993cf-operator-scripts\") pod \"7778e713-1317-48da-a126-8110565993cf\" (UID: \"7778e713-1317-48da-a126-8110565993cf\") " Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.769693 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7778e713-1317-48da-a126-8110565993cf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7778e713-1317-48da-a126-8110565993cf" (UID: "7778e713-1317-48da-a126-8110565993cf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.779564 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7778e713-1317-48da-a126-8110565993cf-kube-api-access-645w6" (OuterVolumeSpecName: "kube-api-access-645w6") pod "7778e713-1317-48da-a126-8110565993cf" (UID: "7778e713-1317-48da-a126-8110565993cf"). InnerVolumeSpecName "kube-api-access-645w6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.927900 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-645w6\" (UniqueName: \"kubernetes.io/projected/7778e713-1317-48da-a126-8110565993cf-kube-api-access-645w6\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.927931 4925 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7778e713-1317-48da-a126-8110565993cf-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.955699 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" event={"ID":"7778e713-1317-48da-a126-8110565993cf","Type":"ContainerDied","Data":"42972bcaa13a182ad99874790294ee3edd03c9b2b7169df64fe1d829d85d859d"} Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.955778 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42972bcaa13a182ad99874790294ee3edd03c9b2b7169df64fe1d829d85d859d" Jan 21 11:38:15 crc kubenswrapper[4925]: I0121 11:38:15.955744 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watchertest-account-delete-lbzrk" Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.016514 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.017115 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-central-agent" containerID="cri-o://0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02" gracePeriod=30 Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.017413 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-notification-agent" containerID="cri-o://88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2" gracePeriod=30 Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.017631 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="proxy-httpd" containerID="cri-o://26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62" gracePeriod=30 Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.017691 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="watcher-kuttl-default/ceilometer-0" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="sg-core" containerID="cri-o://306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634" gracePeriod=30 Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.254884 4925 prober.go:107] "Probe failed" probeType="Readiness" pod="watcher-kuttl-default/ceilometer-0" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.232:3000/\": read tcp 10.217.0.2:32926->10.217.0.232:3000: read: connection reset by peer" Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.971244 4925 generic.go:334] "Generic (PLEG): container finished" podID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerID="26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62" exitCode=0 Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.971287 4925 generic.go:334] "Generic (PLEG): container finished" podID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerID="306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634" exitCode=2 Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.971297 4925 generic.go:334] "Generic (PLEG): container finished" podID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerID="0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02" exitCode=0 Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.971342 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerDied","Data":"26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62"} Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.971440 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerDied","Data":"306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634"} Jan 21 11:38:16 crc kubenswrapper[4925]: I0121 11:38:16.971457 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerDied","Data":"0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02"} Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.629347 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754475 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-sg-core-conf-yaml\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754585 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-run-httpd\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754617 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-scripts\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754687 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-combined-ca-bundle\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754792 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-config-data\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754827 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-ceilometer-tls-certs\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754894 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8l7x\" (UniqueName: \"kubernetes.io/projected/51a8baf6-0755-458f-8c18-0e9bf4892ccd-kube-api-access-q8l7x\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.754947 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-log-httpd\") pod \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\" (UID: \"51a8baf6-0755-458f-8c18-0e9bf4892ccd\") " Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.758359 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.759720 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.761490 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-scripts" (OuterVolumeSpecName: "scripts") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.761909 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51a8baf6-0755-458f-8c18-0e9bf4892ccd-kube-api-access-q8l7x" (OuterVolumeSpecName: "kube-api-access-q8l7x") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "kube-api-access-q8l7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.791870 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.811857 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.834596 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.856939 4925 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.856977 4925 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.856987 4925 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.856996 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.857005 4925 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.857014 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8l7x\" (UniqueName: \"kubernetes.io/projected/51a8baf6-0755-458f-8c18-0e9bf4892ccd-kube-api-access-q8l7x\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.857022 4925 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51a8baf6-0755-458f-8c18-0e9bf4892ccd-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.857912 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-config-data" (OuterVolumeSpecName: "config-data") pod "51a8baf6-0755-458f-8c18-0e9bf4892ccd" (UID: "51a8baf6-0755-458f-8c18-0e9bf4892ccd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.958329 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51a8baf6-0755-458f-8c18-0e9bf4892ccd-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.985526 4925 generic.go:334] "Generic (PLEG): container finished" podID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerID="88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2" exitCode=0 Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.985580 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerDied","Data":"88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2"} Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.985606 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.985623 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"51a8baf6-0755-458f-8c18-0e9bf4892ccd","Type":"ContainerDied","Data":"acadb841afecc880168e486fc3d15472d10400b290254e494950c7184d682fc5"} Jan 21 11:38:17 crc kubenswrapper[4925]: I0121 11:38:17.985647 4925 scope.go:117] "RemoveContainer" containerID="26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.024817 4925 scope.go:117] "RemoveContainer" containerID="306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.036210 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.051876 4925 scope.go:117] "RemoveContainer" containerID="88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.061654 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.083973 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084490 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-notification-agent" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084512 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-notification-agent" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084551 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-api" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084561 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-api" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084615 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="proxy-httpd" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084627 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="proxy-httpd" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084643 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-api" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084651 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-api" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084661 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="sg-core" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084667 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="sg-core" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084676 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7778e713-1317-48da-a126-8110565993cf" containerName="mariadb-account-delete" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084686 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="7778e713-1317-48da-a126-8110565993cf" containerName="mariadb-account-delete" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084707 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-kuttl-api-log" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084716 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-kuttl-api-log" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084730 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-kuttl-api-log" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084736 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-kuttl-api-log" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084749 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69239153-8b79-477f-8b8b-22e84b28872e" containerName="watcher-applier" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084755 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="69239153-8b79-477f-8b8b-22e84b28872e" containerName="watcher-applier" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.084765 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-central-agent" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084771 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-central-agent" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084931 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="proxy-httpd" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084961 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="sg-core" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084975 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-api" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.084986 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-central-agent" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.085001 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" containerName="ceilometer-notification-agent" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.085012 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="d04bdc1e-390d-4961-b390-11e2c231ac6f" containerName="watcher-kuttl-api-log" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.085024 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="69239153-8b79-477f-8b8b-22e84b28872e" containerName="watcher-applier" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.085035 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-kuttl-api-log" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.085047 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d0d0281-6343-43b5-ad81-d89705b152c3" containerName="watcher-api" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.085057 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="7778e713-1317-48da-a126-8110565993cf" containerName="mariadb-account-delete" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.086907 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.094291 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.102464 4925 scope.go:117] "RemoveContainer" containerID="0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.102793 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-config-data" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.103070 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"ceilometer-scripts" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.103214 4925 reflector.go:368] Caches populated for *v1.Secret from object-"watcher-kuttl-default"/"cert-ceilometer-internal-svc" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.137634 4925 scope.go:117] "RemoveContainer" containerID="26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.138474 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62\": container with ID starting with 26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62 not found: ID does not exist" containerID="26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.138546 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62"} err="failed to get container status \"26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62\": rpc error: code = NotFound desc = could not find container \"26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62\": container with ID starting with 26529ea9e1fa70055aa92948ebb966432959fb428dd115626cfee4dc091dee62 not found: ID does not exist" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.138592 4925 scope.go:117] "RemoveContainer" containerID="306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.139155 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634\": container with ID starting with 306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634 not found: ID does not exist" containerID="306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.139192 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634"} err="failed to get container status \"306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634\": rpc error: code = NotFound desc = could not find container \"306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634\": container with ID starting with 306e84f5024b072d895b44bc879703c2009987a225d90b2f49abb972cb767634 not found: ID does not exist" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.139213 4925 scope.go:117] "RemoveContainer" containerID="88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.139824 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2\": container with ID starting with 88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2 not found: ID does not exist" containerID="88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.139857 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2"} err="failed to get container status \"88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2\": rpc error: code = NotFound desc = could not find container \"88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2\": container with ID starting with 88b2684138c5728d231aeb0ebc04b5f66090e21c68c49e54211520347799d6e2 not found: ID does not exist" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.139880 4925 scope.go:117] "RemoveContainer" containerID="0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02" Jan 21 11:38:18 crc kubenswrapper[4925]: E0121 11:38:18.140279 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02\": container with ID starting with 0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02 not found: ID does not exist" containerID="0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.140318 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02"} err="failed to get container status \"0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02\": rpc error: code = NotFound desc = could not find container \"0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02\": container with ID starting with 0e889f1eeea03722988b3b03e2370f52439eda275ba484852bec32cf340f1e02 not found: ID does not exist" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.263012 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.263069 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.263107 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-config-data\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.263160 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-scripts\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.263756 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.263991 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb3be111-53a6-456c-8c50-bf9b7c6cb367-log-httpd\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.264038 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb3be111-53a6-456c-8c50-bf9b7c6cb367-run-httpd\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.264068 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69pd8\" (UniqueName: \"kubernetes.io/projected/eb3be111-53a6-456c-8c50-bf9b7c6cb367-kube-api-access-69pd8\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365448 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365520 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365563 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-config-data\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365620 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-scripts\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365683 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365776 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb3be111-53a6-456c-8c50-bf9b7c6cb367-log-httpd\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365812 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb3be111-53a6-456c-8c50-bf9b7c6cb367-run-httpd\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.365835 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69pd8\" (UniqueName: \"kubernetes.io/projected/eb3be111-53a6-456c-8c50-bf9b7c6cb367-kube-api-access-69pd8\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.366674 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb3be111-53a6-456c-8c50-bf9b7c6cb367-run-httpd\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.366754 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb3be111-53a6-456c-8c50-bf9b7c6cb367-log-httpd\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.370340 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.371594 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.371656 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-config-data\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.378276 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.382252 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb3be111-53a6-456c-8c50-bf9b7c6cb367-scripts\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.390126 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69pd8\" (UniqueName: \"kubernetes.io/projected/eb3be111-53a6-456c-8c50-bf9b7c6cb367-kube-api-access-69pd8\") pod \"ceilometer-0\" (UID: \"eb3be111-53a6-456c-8c50-bf9b7c6cb367\") " pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.422309 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:18 crc kubenswrapper[4925]: I0121 11:38:18.934145 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/ceilometer-0"] Jan 21 11:38:19 crc kubenswrapper[4925]: I0121 11:38:19.128975 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"eb3be111-53a6-456c-8c50-bf9b7c6cb367","Type":"ContainerStarted","Data":"05ee7b7d7510c0476affea77869620339ac8807d44bf0c1c2f7a253f8d0b8052"} Jan 21 11:38:19 crc kubenswrapper[4925]: I0121 11:38:19.539097 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51a8baf6-0755-458f-8c18-0e9bf4892ccd" path="/var/lib/kubelet/pods/51a8baf6-0755-458f-8c18-0e9bf4892ccd/volumes" Jan 21 11:38:20 crc kubenswrapper[4925]: I0121 11:38:20.153171 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"eb3be111-53a6-456c-8c50-bf9b7c6cb367","Type":"ContainerStarted","Data":"12787dd76b8bbdcf51c665f84c295658eaf05b979657ecee2dd3ce29528ca0ed"} Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.187224 4925 generic.go:334] "Generic (PLEG): container finished" podID="2a70b13c-179d-4bed-a69d-3144d4a91e6f" containerID="ac43d46300e53edea6ccffa6a9666bd2095fb033d4c3222f87f23d95e241ca79" exitCode=0 Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.187606 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"2a70b13c-179d-4bed-a69d-3144d4a91e6f","Type":"ContainerDied","Data":"ac43d46300e53edea6ccffa6a9666bd2095fb033d4c3222f87f23d95e241ca79"} Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.188558 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-lbzrk"] Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.198633 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"eb3be111-53a6-456c-8c50-bf9b7c6cb367","Type":"ContainerStarted","Data":"13d4f7f218c5e2e52f35efb8f58bc0d4bb2944d6668dc3491f2e4c062996b1d1"} Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.211451 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watchertest-account-delete-lbzrk"] Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.227003 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-db-create-wn9kc"] Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.247132 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-db-create-wn9kc"] Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.251420 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-update-qp7xf"] Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.262078 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-test-account-create-update-qp7xf"] Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.515657 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f6f16d9-3059-4cf4-b7b3-14668f16677e" path="/var/lib/kubelet/pods/2f6f16d9-3059-4cf4-b7b3-14668f16677e/volumes" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.516717 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7778e713-1317-48da-a126-8110565993cf" path="/var/lib/kubelet/pods/7778e713-1317-48da-a126-8110565993cf/volumes" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.517322 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c364fa54-d06f-486a-ba50-823ac05c6a41" path="/var/lib/kubelet/pods/c364fa54-d06f-486a-ba50-823ac05c6a41/volumes" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.607473 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.794140 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a70b13c-179d-4bed-a69d-3144d4a91e6f-logs\") pod \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.794203 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-custom-prometheus-ca\") pod \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.794260 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-cert-memcached-mtls\") pod \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.794336 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-config-data\") pod \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.794500 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fldt4\" (UniqueName: \"kubernetes.io/projected/2a70b13c-179d-4bed-a69d-3144d4a91e6f-kube-api-access-fldt4\") pod \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.794561 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-combined-ca-bundle\") pod \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\" (UID: \"2a70b13c-179d-4bed-a69d-3144d4a91e6f\") " Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.794815 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a70b13c-179d-4bed-a69d-3144d4a91e6f-logs" (OuterVolumeSpecName: "logs") pod "2a70b13c-179d-4bed-a69d-3144d4a91e6f" (UID: "2a70b13c-179d-4bed-a69d-3144d4a91e6f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.795530 4925 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a70b13c-179d-4bed-a69d-3144d4a91e6f-logs\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.802148 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a70b13c-179d-4bed-a69d-3144d4a91e6f-kube-api-access-fldt4" (OuterVolumeSpecName: "kube-api-access-fldt4") pod "2a70b13c-179d-4bed-a69d-3144d4a91e6f" (UID: "2a70b13c-179d-4bed-a69d-3144d4a91e6f"). InnerVolumeSpecName "kube-api-access-fldt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.832669 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "2a70b13c-179d-4bed-a69d-3144d4a91e6f" (UID: "2a70b13c-179d-4bed-a69d-3144d4a91e6f"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.847544 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a70b13c-179d-4bed-a69d-3144d4a91e6f" (UID: "2a70b13c-179d-4bed-a69d-3144d4a91e6f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.857599 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-config-data" (OuterVolumeSpecName: "config-data") pod "2a70b13c-179d-4bed-a69d-3144d4a91e6f" (UID: "2a70b13c-179d-4bed-a69d-3144d4a91e6f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.897758 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fldt4\" (UniqueName: \"kubernetes.io/projected/2a70b13c-179d-4bed-a69d-3144d4a91e6f-kube-api-access-fldt4\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.897799 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.897808 4925 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.897820 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.913553 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "2a70b13c-179d-4bed-a69d-3144d4a91e6f" (UID: "2a70b13c-179d-4bed-a69d-3144d4a91e6f"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:38:21 crc kubenswrapper[4925]: I0121 11:38:21.999294 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/2a70b13c-179d-4bed-a69d-3144d4a91e6f-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 11:38:22 crc kubenswrapper[4925]: I0121 11:38:22.210766 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"eb3be111-53a6-456c-8c50-bf9b7c6cb367","Type":"ContainerStarted","Data":"1ae5de1b0c89ebbc3116768c2064920a96f5a6306c8dd25d120161119ff3b070"} Jan 21 11:38:22 crc kubenswrapper[4925]: I0121 11:38:22.213310 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" event={"ID":"2a70b13c-179d-4bed-a69d-3144d4a91e6f","Type":"ContainerDied","Data":"30892169e00d8a307fd74a114b0c6fd8bca556e8baca25b9584e22d217745239"} Jan 21 11:38:22 crc kubenswrapper[4925]: I0121 11:38:22.213374 4925 scope.go:117] "RemoveContainer" containerID="ac43d46300e53edea6ccffa6a9666bd2095fb033d4c3222f87f23d95e241ca79" Jan 21 11:38:22 crc kubenswrapper[4925]: I0121 11:38:22.213611 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/watcher-kuttl-decision-engine-0" Jan 21 11:38:22 crc kubenswrapper[4925]: I0121 11:38:22.267107 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:38:22 crc kubenswrapper[4925]: I0121 11:38:22.275641 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["watcher-kuttl-default/watcher-kuttl-decision-engine-0"] Jan 21 11:38:23 crc kubenswrapper[4925]: I0121 11:38:23.228872 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/ceilometer-0" event={"ID":"eb3be111-53a6-456c-8c50-bf9b7c6cb367","Type":"ContainerStarted","Data":"0312c8a402b5032b802d5c46705a816134044098876fea0ccbae3e30557e79a1"} Jan 21 11:38:23 crc kubenswrapper[4925]: I0121 11:38:23.229560 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:23 crc kubenswrapper[4925]: I0121 11:38:23.512303 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a70b13c-179d-4bed-a69d-3144d4a91e6f" path="/var/lib/kubelet/pods/2a70b13c-179d-4bed-a69d-3144d4a91e6f/volumes" Jan 21 11:38:26 crc kubenswrapper[4925]: I0121 11:38:26.502105 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:38:26 crc kubenswrapper[4925]: E0121 11:38:26.502758 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.303387 4925 scope.go:117] "RemoveContainer" containerID="3c2983054a582e5906ee1dc2d463270d5208b3ae3c645a4f34ed5d3c0833ebfb" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.335029 4925 scope.go:117] "RemoveContainer" containerID="0e6ce034b453fc8c600dbcedeb85d10818cb413a9c7d1e6c8b02f2d84095e404" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.377756 4925 scope.go:117] "RemoveContainer" containerID="453fd643287e1d40e75803b12cb52f373a0fbdb54e4ddf5bab744f3a83186ee7" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.417269 4925 scope.go:117] "RemoveContainer" containerID="9304c06a63c95329cfd0850ab0a95543341e741c4dfd34929e328f10293c90dd" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.489032 4925 scope.go:117] "RemoveContainer" containerID="fc4bf639600371f091d6ac33c9e259f8a399354a4c89180ca78de547e96f49d4" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.550118 4925 scope.go:117] "RemoveContainer" containerID="d900d011de833f5bd19fa9a34cdd7a53efcd435de1eee98f99f5bb72b1b7033e" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.587764 4925 scope.go:117] "RemoveContainer" containerID="090b14403ece320980b1a8c9c6dc2e68704b7cd51811cb33c049746c3b9e8c7a" Jan 21 11:38:31 crc kubenswrapper[4925]: I0121 11:38:31.636843 4925 scope.go:117] "RemoveContainer" containerID="0704954ae6a2f2c6582908b25b084a33b6916e4418a9e6b9a54be7ea13cd3683" Jan 21 11:38:40 crc kubenswrapper[4925]: I0121 11:38:40.502624 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:38:40 crc kubenswrapper[4925]: E0121 11:38:40.503412 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.388519 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="watcher-kuttl-default/ceilometer-0" podStartSLOduration=25.944451864 podStartE2EDuration="29.388482861s" podCreationTimestamp="2026-01-21 11:38:18 +0000 UTC" firstStartedPulling="2026-01-21 11:38:18.936286081 +0000 UTC m=+2590.540178015" lastFinishedPulling="2026-01-21 11:38:22.380317078 +0000 UTC m=+2593.984209012" observedRunningTime="2026-01-21 11:38:23.357990314 +0000 UTC m=+2594.961882248" watchObservedRunningTime="2026-01-21 11:38:47.388482861 +0000 UTC m=+2618.992374795" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.397413 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tws8s/must-gather-q7jxq"] Jan 21 11:38:47 crc kubenswrapper[4925]: E0121 11:38:47.397911 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a70b13c-179d-4bed-a69d-3144d4a91e6f" containerName="watcher-decision-engine" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.397939 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a70b13c-179d-4bed-a69d-3144d4a91e6f" containerName="watcher-decision-engine" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.398174 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a70b13c-179d-4bed-a69d-3144d4a91e6f" containerName="watcher-decision-engine" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.399518 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.404972 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tws8s"/"openshift-service-ca.crt" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.405073 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-tws8s"/"default-dockercfg-7s4lw" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.405097 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tws8s"/"kube-root-ca.crt" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.412993 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tws8s/must-gather-q7jxq"] Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.687721 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54ttg\" (UniqueName: \"kubernetes.io/projected/4f4d2099-5115-4065-971b-e51c63e6ee05-kube-api-access-54ttg\") pod \"must-gather-q7jxq\" (UID: \"4f4d2099-5115-4065-971b-e51c63e6ee05\") " pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.687930 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4f4d2099-5115-4065-971b-e51c63e6ee05-must-gather-output\") pod \"must-gather-q7jxq\" (UID: \"4f4d2099-5115-4065-971b-e51c63e6ee05\") " pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.789694 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4f4d2099-5115-4065-971b-e51c63e6ee05-must-gather-output\") pod \"must-gather-q7jxq\" (UID: \"4f4d2099-5115-4065-971b-e51c63e6ee05\") " pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.789829 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54ttg\" (UniqueName: \"kubernetes.io/projected/4f4d2099-5115-4065-971b-e51c63e6ee05-kube-api-access-54ttg\") pod \"must-gather-q7jxq\" (UID: \"4f4d2099-5115-4065-971b-e51c63e6ee05\") " pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.790293 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4f4d2099-5115-4065-971b-e51c63e6ee05-must-gather-output\") pod \"must-gather-q7jxq\" (UID: \"4f4d2099-5115-4065-971b-e51c63e6ee05\") " pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:47 crc kubenswrapper[4925]: I0121 11:38:47.820515 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54ttg\" (UniqueName: \"kubernetes.io/projected/4f4d2099-5115-4065-971b-e51c63e6ee05-kube-api-access-54ttg\") pod \"must-gather-q7jxq\" (UID: \"4f4d2099-5115-4065-971b-e51c63e6ee05\") " pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:48 crc kubenswrapper[4925]: I0121 11:38:48.031653 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tws8s/must-gather-q7jxq" Jan 21 11:38:48 crc kubenswrapper[4925]: I0121 11:38:48.470050 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="watcher-kuttl-default/ceilometer-0" Jan 21 11:38:48 crc kubenswrapper[4925]: I0121 11:38:48.726313 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tws8s/must-gather-q7jxq"] Jan 21 11:38:49 crc kubenswrapper[4925]: I0121 11:38:49.518609 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tws8s/must-gather-q7jxq" event={"ID":"4f4d2099-5115-4065-971b-e51c63e6ee05","Type":"ContainerStarted","Data":"914760ddc2d4e2aa0e7cbb373a6d0c68a5a9fbd9f96ed110d96d6e8640f41951"} Jan 21 11:38:51 crc kubenswrapper[4925]: I0121 11:38:51.567145 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:38:51 crc kubenswrapper[4925]: E0121 11:38:51.568230 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:39:01 crc kubenswrapper[4925]: I0121 11:39:01.852057 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tws8s/must-gather-q7jxq" event={"ID":"4f4d2099-5115-4065-971b-e51c63e6ee05","Type":"ContainerStarted","Data":"6dbd703a96d4faab608ab81d083ca4dadc734706aa845c5b1fbf21ab878969be"} Jan 21 11:39:01 crc kubenswrapper[4925]: I0121 11:39:01.852813 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tws8s/must-gather-q7jxq" event={"ID":"4f4d2099-5115-4065-971b-e51c63e6ee05","Type":"ContainerStarted","Data":"d80287b33e63800cf0de16d98071d589ef013897870c08cff6fe0316d90dc1a2"} Jan 21 11:39:02 crc kubenswrapper[4925]: I0121 11:39:02.502183 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:39:02 crc kubenswrapper[4925]: E0121 11:39:02.502556 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:39:17 crc kubenswrapper[4925]: I0121 11:39:17.502263 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:39:17 crc kubenswrapper[4925]: E0121 11:39:17.503209 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:39:29 crc kubenswrapper[4925]: I0121 11:39:29.417716 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q24bt_66dc8772-25c5-4ad1-b0fa-6981e3158ad5/controller/0.log" Jan 21 11:39:29 crc kubenswrapper[4925]: I0121 11:39:29.434433 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q24bt_66dc8772-25c5-4ad1-b0fa-6981e3158ad5/kube-rbac-proxy/0.log" Jan 21 11:39:29 crc kubenswrapper[4925]: I0121 11:39:29.455358 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/controller/0.log" Jan 21 11:39:30 crc kubenswrapper[4925]: I0121 11:39:30.630479 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:39:30 crc kubenswrapper[4925]: E0121 11:39:30.631509 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.851209 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/frr/0.log" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.869456 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/reloader/0.log" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.885298 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/frr-metrics/0.log" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.899566 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/kube-rbac-proxy/0.log" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.915965 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/kube-rbac-proxy-frr/0.log" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.916610 4925 scope.go:117] "RemoveContainer" containerID="fea557108ee2880b6213b3ae9c90d1619bfae757a0ab40c09b3b4dade764862a" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.945839 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-frr-files/0.log" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.951039 4925 scope.go:117] "RemoveContainer" containerID="b3f2c97406c139215c34b870a0b526a4e41dc1fda993e0ab1b4230292a5f5dce" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.965033 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-reloader/0.log" Jan 21 11:39:31 crc kubenswrapper[4925]: I0121 11:39:31.986186 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-metrics/0.log" Jan 21 11:39:32 crc kubenswrapper[4925]: I0121 11:39:32.005683 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-9d94c_0b7695ad-2b58-4be9-911d-bc83bece0db7/frr-k8s-webhook-server/0.log" Jan 21 11:39:32 crc kubenswrapper[4925]: I0121 11:39:32.034214 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57547767ff-zrxjk_1f8eea58-9366-4bb1-a9d2-dc8842674dc2/manager/0.log" Jan 21 11:39:32 crc kubenswrapper[4925]: I0121 11:39:32.050510 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5b9dd8b59d-59895_955477b1-b9f0-41a2-aa5b-2e2f47495422/webhook-server/0.log" Jan 21 11:39:32 crc kubenswrapper[4925]: I0121 11:39:32.352234 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zxq6z_0ac5019d-ffb4-4cb6-9042-1b983b15841a/speaker/0.log" Jan 21 11:39:32 crc kubenswrapper[4925]: I0121 11:39:32.359717 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zxq6z_0ac5019d-ffb4-4cb6-9042-1b983b15841a/kube-rbac-proxy/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.464499 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/extract/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.489998 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/util/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.498022 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/pull/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.514752 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/extract/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.524801 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/util/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.538635 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/pull/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.568412 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-r4klh_d8031329-a6ad-49da-881e-94db9f545ab7/manager/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.613116 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-vmg65_cf77bf31-5d25-4015-b274-05dbedbedf5a/manager/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.627247 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mjg4d_50c322c0-a941-48fa-bf86-c2daa64a9aa8/manager/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.642529 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-ggpw9_e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23/manager/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.652361 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-h9szq_9b9f5cfa-93e1-4940-b7f0-066c6bc4f194/manager/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.666678 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-gcxp4_c4be49a0-e872-456f-a102-928f5210524f/manager/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.894029 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-dqjpf_dbe9a043-a969-429b-b7b1-33d12296c52c/manager/0.log" Jan 21 11:39:35 crc kubenswrapper[4925]: I0121 11:39:35.921261 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-wdwvl_2d8c2e69-7444-465a-a418-59d9c5b20074/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.065711 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-2znsh_2c47ce4c-9012-4798-9bf8-127a96ad285e/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.088127 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-x7474_fc8ec38e-f941-4ba0-863e-933e10bf2043/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.144101 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-b4cd2_6d27cfd1-683a-4e92-bcaf-40f1f370cd1b/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.153470 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-t9fng_398ea514-c4f3-40db-8421-ebf007fda30d/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.167694 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-nqldj_a7dd34dc-8a69-4c91-88ec-d1d7beffb15d/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.178919 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-44xwf_cc5d8922-f54d-42a1-b23a-622329e3f644/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.196838 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9_05db7c08-87f6-4518-8d61-c87cbf0b1735/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.869974 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-87d6d564b-dgm28_be80c7ef-4f5f-4660-9954-5ab5b34655cf/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.880318 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6jmrz_25cc6d46-b21a-463f-a13a-9874780c87f3/registry-server/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.901164 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-l9f98_a032309d-2543-4e6b-8207-d8097dffcaf5/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.915374 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-84spn_0fb89ff9-2ba9-4a38-b739-43fa22a5b209/manager/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.933814 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-k7r2f_182d9a34-f024-4a86-8851-9e20d654f4ac/operator/0.log" Jan 21 11:39:36 crc kubenswrapper[4925]: I0121 11:39:36.943853 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-cq4k9_a9c52af6-912a-4e93-bbcd-42e961453471/manager/0.log" Jan 21 11:39:39 crc kubenswrapper[4925]: I0121 11:39:39.071632 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-gcwbr_d7429a44-6eeb-419b-8193-29275baf4ad9/manager/0.log" Jan 21 11:39:39 crc kubenswrapper[4925]: I0121 11:39:39.091519 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-hvtnz_fd15c43d-a647-467e-a4f1-eb0ca81a123f/manager/0.log" Jan 21 11:39:39 crc kubenswrapper[4925]: I0121 11:39:39.557816 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-696df99475-8gncw_d8433174-98d9-44ec-924f-6fe639538b64/manager/0.log" Jan 21 11:39:39 crc kubenswrapper[4925]: I0121 11:39:39.576215 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-index-lvk6z_2ac527d2-36c7-40bc-ae76-8d007f3dadb3/registry-server/0.log" Jan 21 11:39:41 crc kubenswrapper[4925]: I0121 11:39:41.502680 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:39:41 crc kubenswrapper[4925]: E0121 11:39:41.503018 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:39:44 crc kubenswrapper[4925]: I0121 11:39:44.124178 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-2fd99_5c2fa6a9-ee76-4308-a8f1-095d9720c688/control-plane-machine-set-operator/0.log" Jan 21 11:39:44 crc kubenswrapper[4925]: I0121 11:39:44.142093 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bmpxp_5a264bb6-3e63-4411-b0a4-95be21527653/kube-rbac-proxy/0.log" Jan 21 11:39:44 crc kubenswrapper[4925]: I0121 11:39:44.150741 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bmpxp_5a264bb6-3e63-4411-b0a4-95be21527653/machine-api-operator/0.log" Jan 21 11:39:50 crc kubenswrapper[4925]: I0121 11:39:50.639031 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-946sh_d5dc9762-e122-475f-a1a2-2d9711313716/cert-manager-controller/0.log" Jan 21 11:39:50 crc kubenswrapper[4925]: I0121 11:39:50.661538 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-gl7pg_1ae434a0-174f-4c93-bf2c-ab2091c54e6c/cert-manager-cainjector/0.log" Jan 21 11:39:50 crc kubenswrapper[4925]: I0121 11:39:50.672278 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-9vbqj_d2372e44-af17-4c55-9a11-67fb28adcc08/cert-manager-webhook/0.log" Jan 21 11:39:55 crc kubenswrapper[4925]: I0121 11:39:55.506282 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:39:55 crc kubenswrapper[4925]: E0121 11:39:55.507136 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:39:57 crc kubenswrapper[4925]: I0121 11:39:57.281750 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-58tbd_eac14392-e11f-4bf9-b1db-d6200c0d0821/nmstate-console-plugin/0.log" Jan 21 11:39:57 crc kubenswrapper[4925]: I0121 11:39:57.312465 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-j7spk_a3532c91-ec04-4a0c-99d3-bf6bf96a8887/nmstate-handler/0.log" Jan 21 11:39:57 crc kubenswrapper[4925]: I0121 11:39:57.325631 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-c5llf_e125e9a3-31e1-47ce-ab99-c65ace2a60ec/nmstate-metrics/0.log" Jan 21 11:39:57 crc kubenswrapper[4925]: I0121 11:39:57.340815 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-c5llf_e125e9a3-31e1-47ce-ab99-c65ace2a60ec/kube-rbac-proxy/0.log" Jan 21 11:39:57 crc kubenswrapper[4925]: I0121 11:39:57.381438 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-8kzgs_88c3cb65-2aff-44db-85fb-8c365c93439f/nmstate-operator/0.log" Jan 21 11:39:57 crc kubenswrapper[4925]: I0121 11:39:57.391177 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-87q5g_3c739657-6960-46fe-be71-6d965b98e714/nmstate-webhook/0.log" Jan 21 11:40:04 crc kubenswrapper[4925]: I0121 11:40:04.785136 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-x48ml_5331ad9e-1914-414a-a7b2-b52eb191ba2f/prometheus-operator/0.log" Jan 21 11:40:04 crc kubenswrapper[4925]: I0121 11:40:04.797602 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_28aa0136-6b61-4a88-907d-265c48e36f08/prometheus-operator-admission-webhook/0.log" Jan 21 11:40:04 crc kubenswrapper[4925]: I0121 11:40:04.881860 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_ea8b2f0b-f77a-4737-be37-3268437871d9/prometheus-operator-admission-webhook/0.log" Jan 21 11:40:04 crc kubenswrapper[4925]: I0121 11:40:04.924471 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-jvt9p_e052dc8b-2520-4757-bb0a-d1350ad44b08/operator/0.log" Jan 21 11:40:04 crc kubenswrapper[4925]: I0121 11:40:04.948171 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-mmj2s_a5dc6045-7192-42dc-b653-a71b80a9f119/observability-ui-dashboards/0.log" Jan 21 11:40:04 crc kubenswrapper[4925]: I0121 11:40:04.968894 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-655nk_0d031a33-73a8-45d7-9979-e1266d9e7be7/perses-operator/0.log" Jan 21 11:40:07 crc kubenswrapper[4925]: I0121 11:40:07.503326 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:40:07 crc kubenswrapper[4925]: E0121 11:40:07.504123 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:40:13 crc kubenswrapper[4925]: I0121 11:40:13.043534 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q24bt_66dc8772-25c5-4ad1-b0fa-6981e3158ad5/controller/0.log" Jan 21 11:40:13 crc kubenswrapper[4925]: I0121 11:40:13.055500 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q24bt_66dc8772-25c5-4ad1-b0fa-6981e3158ad5/kube-rbac-proxy/0.log" Jan 21 11:40:13 crc kubenswrapper[4925]: I0121 11:40:13.098378 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/controller/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.456374 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/frr/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.486709 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/reloader/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.554748 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/frr-metrics/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.725514 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/kube-rbac-proxy/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.816577 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/kube-rbac-proxy-frr/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.824271 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-frr-files/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.839638 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-reloader/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.851293 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-metrics/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.863670 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-9d94c_0b7695ad-2b58-4be9-911d-bc83bece0db7/frr-k8s-webhook-server/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.898625 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57547767ff-zrxjk_1f8eea58-9366-4bb1-a9d2-dc8842674dc2/manager/0.log" Jan 21 11:40:14 crc kubenswrapper[4925]: I0121 11:40:14.921029 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5b9dd8b59d-59895_955477b1-b9f0-41a2-aa5b-2e2f47495422/webhook-server/0.log" Jan 21 11:40:15 crc kubenswrapper[4925]: I0121 11:40:15.183216 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zxq6z_0ac5019d-ffb4-4cb6-9042-1b983b15841a/speaker/0.log" Jan 21 11:40:15 crc kubenswrapper[4925]: I0121 11:40:15.195813 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zxq6z_0ac5019d-ffb4-4cb6-9042-1b983b15841a/kube-rbac-proxy/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.297801 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_alertmanager-metric-storage-0_938ddfe0-198f-4050-af00-6c195ffaa41e/alertmanager/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.306982 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_alertmanager-metric-storage-0_938ddfe0-198f-4050-af00-6c195ffaa41e/config-reloader/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.320661 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_alertmanager-metric-storage-0_938ddfe0-198f-4050-af00-6c195ffaa41e/init-config-reloader/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.384909 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_eb3be111-53a6-456c-8c50-bf9b7c6cb367/ceilometer-central-agent/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.406864 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_eb3be111-53a6-456c-8c50-bf9b7c6cb367/ceilometer-notification-agent/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.414751 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_eb3be111-53a6-456c-8c50-bf9b7c6cb367/sg-core/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.433495 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_ceilometer-0_eb3be111-53a6-456c-8c50-bf9b7c6cb367/proxy-httpd/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.502257 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:40:21 crc kubenswrapper[4925]: E0121 11:40:21.502585 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.553944 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_keystone-6cf7c7c58-dd2hr_86449d00-a2ae-4fb9-8529-e5a140d7b2f8/keystone-api/0.log" Jan 21 11:40:21 crc kubenswrapper[4925]: I0121 11:40:21.567073 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_kube-state-metrics-0_36d577b6-db6f-4302-a839-ed148c56f7b6/kube-state-metrics/0.log" Jan 21 11:40:32 crc kubenswrapper[4925]: I0121 11:40:32.061044 4925 scope.go:117] "RemoveContainer" containerID="ca16500b733b5186298e744deaf79e1611286bdda6fd8e8c280cd500a527f15a" Jan 21 11:40:32 crc kubenswrapper[4925]: I0121 11:40:32.113836 4925 scope.go:117] "RemoveContainer" containerID="a1a520a5aa970b1272b144e532e7c9ac0dd70608fa3d7a4bd5a4e76c4a0de0d7" Jan 21 11:40:34 crc kubenswrapper[4925]: I0121 11:40:34.501560 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:40:34 crc kubenswrapper[4925]: E0121 11:40:34.502122 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.622552 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_memcached-0_1a2e82ce-ac09-4cf1-95cd-3c206a2a06ed/memcached/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.648030 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_openstack-galera-0_a76fd86c-08d3-47af-af39-e3336a2f5c0b/galera/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.660994 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_openstack-galera-0_a76fd86c-08d3-47af-af39-e3336a2f5c0b/mysql-bootstrap/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.686513 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_openstackclient_2c73a99a-7a7e-4746-9404-3dc64865ea05/openstackclient/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.704571 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_74f59733-5086-4ebd-9e6a-764a947d38b4/prometheus/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.711754 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_74f59733-5086-4ebd-9e6a-764a947d38b4/config-reloader/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.718978 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_74f59733-5086-4ebd-9e6a-764a947d38b4/thanos-sidecar/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.726760 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_prometheus-metric-storage-0_74f59733-5086-4ebd-9e6a-764a947d38b4/init-config-reloader/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.757927 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-notifications-server-0_4c494924-513c-4575-a9c9-78e15c3751bc/rabbitmq/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.767240 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-notifications-server-0_4c494924-513c-4575-a9c9-78e15c3751bc/setup-container/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.828426 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-server-0_b7c93089-4b7c-45c7-aa48-64622e536032/rabbitmq/0.log" Jan 21 11:40:35 crc kubenswrapper[4925]: I0121 11:40:35.834987 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/watcher-kuttl-default_rabbitmq-server-0_b7c93089-4b7c-45c7-aa48-64622e536032/setup-container/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.505964 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm_b4b9e49f-2140-42de-b29f-6241bafc109e/extract/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.536627 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm_b4b9e49f-2140-42de-b29f-6241bafc109e/util/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.573668 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arj2lm_b4b9e49f-2140-42de-b29f-6241bafc109e/pull/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.585574 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76_5dfe3134-6d2f-47d9-b786-f69a4fbcf164/extract/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.597179 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76_5dfe3134-6d2f-47d9-b786-f69a4fbcf164/util/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.607639 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchvg76_5dfe3134-6d2f-47d9-b786-f69a4fbcf164/pull/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.622842 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg_e058b309-ed1b-4162-a8e1-adf175ab47cf/extract/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.632680 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg_e058b309-ed1b-4162-a8e1-adf175ab47cf/util/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.641018 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713jdzzg_e058b309-ed1b-4162-a8e1-adf175ab47cf/pull/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.779210 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh_b8412bf3-79ed-4401-a927-e30a8a770afc/extract/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.793063 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh_b8412bf3-79ed-4401-a927-e30a8a770afc/util/0.log" Jan 21 11:40:44 crc kubenswrapper[4925]: I0121 11:40:44.818159 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08nj8kh_b8412bf3-79ed-4401-a927-e30a8a770afc/pull/0.log" Jan 21 11:40:45 crc kubenswrapper[4925]: I0121 11:40:45.302191 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xwrpb_c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426/registry-server/0.log" Jan 21 11:40:45 crc kubenswrapper[4925]: I0121 11:40:45.308595 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xwrpb_c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426/extract-utilities/0.log" Jan 21 11:40:45 crc kubenswrapper[4925]: I0121 11:40:45.318411 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xwrpb_c14bf9ba-8bf3-4a6d-a15b-0f62bc86d426/extract-content/0.log" Jan 21 11:40:45 crc kubenswrapper[4925]: I0121 11:40:45.523561 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:40:45 crc kubenswrapper[4925]: E0121 11:40:45.523853 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.028636 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xbt62_19b1508e-f4b4-420f-abc7-d2c922cea0fc/registry-server/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.034830 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xbt62_19b1508e-f4b4-420f-abc7-d2c922cea0fc/extract-utilities/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.044257 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xbt62_19b1508e-f4b4-420f-abc7-d2c922cea0fc/extract-content/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.063340 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kzv24_9821d9aa-a481-43fd-a938-98d978d17299/marketplace-operator/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.196867 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bbvfx_d658610f-6e84-446d-9d81-e4e4198a6102/registry-server/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.315072 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bbvfx_d658610f-6e84-446d-9d81-e4e4198a6102/extract-utilities/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.330438 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bbvfx_d658610f-6e84-446d-9d81-e4e4198a6102/extract-content/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.754561 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mldwz_c49d0579-4622-43ef-a28d-7cbf66ce5998/registry-server/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.761543 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mldwz_c49d0579-4622-43ef-a28d-7cbf66ce5998/extract-utilities/0.log" Jan 21 11:40:46 crc kubenswrapper[4925]: I0121 11:40:46.769045 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mldwz_c49d0579-4622-43ef-a28d-7cbf66ce5998/extract-content/0.log" Jan 21 11:40:51 crc kubenswrapper[4925]: I0121 11:40:51.960825 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-x48ml_5331ad9e-1914-414a-a7b2-b52eb191ba2f/prometheus-operator/0.log" Jan 21 11:40:51 crc kubenswrapper[4925]: I0121 11:40:51.983468 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_28aa0136-6b61-4a88-907d-265c48e36f08/prometheus-operator-admission-webhook/0.log" Jan 21 11:40:51 crc kubenswrapper[4925]: I0121 11:40:51.998985 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_ea8b2f0b-f77a-4737-be37-3268437871d9/prometheus-operator-admission-webhook/0.log" Jan 21 11:40:52 crc kubenswrapper[4925]: I0121 11:40:52.037868 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-jvt9p_e052dc8b-2520-4757-bb0a-d1350ad44b08/operator/0.log" Jan 21 11:40:52 crc kubenswrapper[4925]: I0121 11:40:52.045870 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-mmj2s_a5dc6045-7192-42dc-b653-a71b80a9f119/observability-ui-dashboards/0.log" Jan 21 11:40:52 crc kubenswrapper[4925]: I0121 11:40:52.060072 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-655nk_0d031a33-73a8-45d7-9979-e1266d9e7be7/perses-operator/0.log" Jan 21 11:40:59 crc kubenswrapper[4925]: I0121 11:40:59.507270 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:40:59 crc kubenswrapper[4925]: E0121 11:40:59.507995 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:41:12 crc kubenswrapper[4925]: I0121 11:41:12.502617 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:41:12 crc kubenswrapper[4925]: E0121 11:41:12.503328 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:41:26 crc kubenswrapper[4925]: I0121 11:41:26.501578 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:41:27 crc kubenswrapper[4925]: I0121 11:41:27.361596 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"2529be439eb059316b87a73fdab3300524b65045be77bd9c5795f114c7d0f947"} Jan 21 11:41:27 crc kubenswrapper[4925]: I0121 11:41:27.392926 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-tws8s/must-gather-q7jxq" podStartSLOduration=148.267031857 podStartE2EDuration="2m40.392871183s" podCreationTimestamp="2026-01-21 11:38:47 +0000 UTC" firstStartedPulling="2026-01-21 11:38:48.734057447 +0000 UTC m=+2620.337949381" lastFinishedPulling="2026-01-21 11:39:00.859896773 +0000 UTC m=+2632.463788707" observedRunningTime="2026-01-21 11:39:01.872695095 +0000 UTC m=+2633.476587029" watchObservedRunningTime="2026-01-21 11:41:27.392871183 +0000 UTC m=+2778.996763107" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.250324 4925 scope.go:117] "RemoveContainer" containerID="6a4cddc21beee0bd825ac08e15631e6c37747e29e1310fc29b36730d1fdb747a" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.274752 4925 scope.go:117] "RemoveContainer" containerID="589148156dc33205bcc63afccc1aa1e5f9cb136d8f6ceea1e1669cc3fdf8c5e8" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.330361 4925 scope.go:117] "RemoveContainer" containerID="6953899fcdf4ed8d6235cd8bfc0b3d0780f42647d8602f55be72fca6ef74ca54" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.422992 4925 scope.go:117] "RemoveContainer" containerID="44a01aeb3a8e4af907e3abea96d300314731378a6e35f4267b229d1af5abb1bc" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.477623 4925 scope.go:117] "RemoveContainer" containerID="675d805878270f86732a88bd5a2d37d546821329662ef0efb209fdf85bcdb226" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.518302 4925 scope.go:117] "RemoveContainer" containerID="f58fe1de40b4418913300b75185750283ca91971005665c64754e92b4197c09b" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.557658 4925 scope.go:117] "RemoveContainer" containerID="cbe7355def38373967dececdef68f3770f3edddb81a7a49faa58ae53512747ff" Jan 21 11:41:32 crc kubenswrapper[4925]: I0121 11:41:32.593548 4925 scope.go:117] "RemoveContainer" containerID="fd6e8e573247080f6fe03b76cf18c5be91ba1a5482d626abb6aeaca163ba27f5" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.418297 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-x48ml_5331ad9e-1914-414a-a7b2-b52eb191ba2f/prometheus-operator/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.429595 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58676c4f98-fgbpf_28aa0136-6b61-4a88-907d-265c48e36f08/prometheus-operator-admission-webhook/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.453940 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58676c4f98-l96bw_ea8b2f0b-f77a-4737-be37-3268437871d9/prometheus-operator-admission-webhook/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.493381 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-jvt9p_e052dc8b-2520-4757-bb0a-d1350ad44b08/operator/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.501853 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-mmj2s_a5dc6045-7192-42dc-b653-a71b80a9f119/observability-ui-dashboards/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.522807 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-655nk_0d031a33-73a8-45d7-9979-e1266d9e7be7/perses-operator/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.764783 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-946sh_d5dc9762-e122-475f-a1a2-2d9711313716/cert-manager-controller/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.782719 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-gl7pg_1ae434a0-174f-4c93-bf2c-ab2091c54e6c/cert-manager-cainjector/0.log" Jan 21 11:41:56 crc kubenswrapper[4925]: I0121 11:41:56.792779 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-9vbqj_d2372e44-af17-4c55-9a11-67fb28adcc08/cert-manager-webhook/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.151553 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/extract/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.167291 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/util/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.175632 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/pull/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.192187 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/extract/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.204488 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/util/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.230735 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/pull/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.249758 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-r4klh_d8031329-a6ad-49da-881e-94db9f545ab7/manager/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.306762 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-vmg65_cf77bf31-5d25-4015-b274-05dbedbedf5a/manager/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.320375 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mjg4d_50c322c0-a941-48fa-bf86-c2daa64a9aa8/manager/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.327251 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q24bt_66dc8772-25c5-4ad1-b0fa-6981e3158ad5/controller/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.336669 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q24bt_66dc8772-25c5-4ad1-b0fa-6981e3158ad5/kube-rbac-proxy/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.339820 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-ggpw9_e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23/manager/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.357169 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-h9szq_9b9f5cfa-93e1-4940-b7f0-066c6bc4f194/manager/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.375004 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/controller/0.log" Jan 21 11:41:58 crc kubenswrapper[4925]: I0121 11:41:58.385041 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-gcxp4_c4be49a0-e872-456f-a102-928f5210524f/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.055624 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-dqjpf_dbe9a043-a969-429b-b7b1-33d12296c52c/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.073456 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-wdwvl_2d8c2e69-7444-465a-a418-59d9c5b20074/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.257091 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-2znsh_2c47ce4c-9012-4798-9bf8-127a96ad285e/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.271779 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-x7474_fc8ec38e-f941-4ba0-863e-933e10bf2043/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.326293 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-b4cd2_6d27cfd1-683a-4e92-bcaf-40f1f370cd1b/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.337952 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-t9fng_398ea514-c4f3-40db-8421-ebf007fda30d/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.353237 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-nqldj_a7dd34dc-8a69-4c91-88ec-d1d7beffb15d/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.373013 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-44xwf_cc5d8922-f54d-42a1-b23a-622329e3f644/manager/0.log" Jan 21 11:41:59 crc kubenswrapper[4925]: I0121 11:41:59.399281 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9_05db7c08-87f6-4518-8d61-c87cbf0b1735/manager/0.log" Jan 21 11:42:00 crc kubenswrapper[4925]: I0121 11:42:00.455849 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-87d6d564b-dgm28_be80c7ef-4f5f-4660-9954-5ab5b34655cf/manager/0.log" Jan 21 11:42:00 crc kubenswrapper[4925]: I0121 11:42:00.468807 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6jmrz_25cc6d46-b21a-463f-a13a-9874780c87f3/registry-server/0.log" Jan 21 11:42:00 crc kubenswrapper[4925]: I0121 11:42:00.493308 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-l9f98_a032309d-2543-4e6b-8207-d8097dffcaf5/manager/0.log" Jan 21 11:42:00 crc kubenswrapper[4925]: I0121 11:42:00.527501 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-84spn_0fb89ff9-2ba9-4a38-b739-43fa22a5b209/manager/0.log" Jan 21 11:42:00 crc kubenswrapper[4925]: I0121 11:42:00.549421 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-k7r2f_182d9a34-f024-4a86-8851-9e20d654f4ac/operator/0.log" Jan 21 11:42:00 crc kubenswrapper[4925]: I0121 11:42:00.563553 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-cq4k9_a9c52af6-912a-4e93-bbcd-42e961453471/manager/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.072129 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/frr/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.094590 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-gcwbr_d7429a44-6eeb-419b-8193-29275baf4ad9/manager/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.096564 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/reloader/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.130529 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/frr-metrics/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.153582 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-hvtnz_fd15c43d-a647-467e-a4f1-eb0ca81a123f/manager/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.153701 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/kube-rbac-proxy/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.163270 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/kube-rbac-proxy-frr/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.178703 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-frr-files/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.186343 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-reloader/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.195197 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7tz4m_9449246f-d4a0-407f-8e9f-cb7271c90d72/cp-metrics/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.216068 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-9d94c_0b7695ad-2b58-4be9-911d-bc83bece0db7/frr-k8s-webhook-server/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.248038 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57547767ff-zrxjk_1f8eea58-9366-4bb1-a9d2-dc8842674dc2/manager/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.265800 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5b9dd8b59d-59895_955477b1-b9f0-41a2-aa5b-2e2f47495422/webhook-server/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.771107 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zxq6z_0ac5019d-ffb4-4cb6-9042-1b983b15841a/speaker/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.849196 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zxq6z_0ac5019d-ffb4-4cb6-9042-1b983b15841a/kube-rbac-proxy/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.849453 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-696df99475-8gncw_d8433174-98d9-44ec-924f-6fe639538b64/manager/0.log" Jan 21 11:42:01 crc kubenswrapper[4925]: I0121 11:42:01.860044 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-index-lvk6z_2ac527d2-36c7-40bc-ae76-8d007f3dadb3/registry-server/0.log" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.157966 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wr797"] Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.160778 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.171029 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wr797"] Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.329194 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlxb4\" (UniqueName: \"kubernetes.io/projected/5e941599-4ca1-4049-8f8a-dab0f762fcf9-kube-api-access-zlxb4\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.329288 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-utilities\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.329345 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-catalog-content\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.430577 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlxb4\" (UniqueName: \"kubernetes.io/projected/5e941599-4ca1-4049-8f8a-dab0f762fcf9-kube-api-access-zlxb4\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.430676 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-utilities\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.430734 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-catalog-content\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.432207 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-catalog-content\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.432572 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-utilities\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.467532 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlxb4\" (UniqueName: \"kubernetes.io/projected/5e941599-4ca1-4049-8f8a-dab0f762fcf9-kube-api-access-zlxb4\") pod \"redhat-operators-wr797\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.492230 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:02 crc kubenswrapper[4925]: I0121 11:42:02.961793 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wr797"] Jan 21 11:42:03 crc kubenswrapper[4925]: I0121 11:42:03.050036 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr797" event={"ID":"5e941599-4ca1-4049-8f8a-dab0f762fcf9","Type":"ContainerStarted","Data":"7058ef8086ef025c6e94455480088d01ea728a8f9c0fdcba51442cb82f3d592d"} Jan 21 11:42:03 crc kubenswrapper[4925]: I0121 11:42:03.350290 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-946sh_d5dc9762-e122-475f-a1a2-2d9711313716/cert-manager-controller/0.log" Jan 21 11:42:03 crc kubenswrapper[4925]: I0121 11:42:03.470673 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-gl7pg_1ae434a0-174f-4c93-bf2c-ab2091c54e6c/cert-manager-cainjector/0.log" Jan 21 11:42:03 crc kubenswrapper[4925]: I0121 11:42:03.486322 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-9vbqj_d2372e44-af17-4c55-9a11-67fb28adcc08/cert-manager-webhook/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.060257 4925 generic.go:334] "Generic (PLEG): container finished" podID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerID="5e28979350df4bdbb8a946976d88cf10c4eabfb301a4830649a380909a5dd8e0" exitCode=0 Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.060313 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr797" event={"ID":"5e941599-4ca1-4049-8f8a-dab0f762fcf9","Type":"ContainerDied","Data":"5e28979350df4bdbb8a946976d88cf10c4eabfb301a4830649a380909a5dd8e0"} Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.263987 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-58tbd_eac14392-e11f-4bf9-b1db-d6200c0d0821/nmstate-console-plugin/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.303776 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-j7spk_a3532c91-ec04-4a0c-99d3-bf6bf96a8887/nmstate-handler/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.316643 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-c5llf_e125e9a3-31e1-47ce-ab99-c65ace2a60ec/nmstate-metrics/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.326928 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-c5llf_e125e9a3-31e1-47ce-ab99-c65ace2a60ec/kube-rbac-proxy/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.346010 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-8kzgs_88c3cb65-2aff-44db-85fb-8c365c93439f/nmstate-operator/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.357613 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-87q5g_3c739657-6960-46fe-be71-6d965b98e714/nmstate-webhook/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.827724 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-2fd99_5c2fa6a9-ee76-4308-a8f1-095d9720c688/control-plane-machine-set-operator/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.859591 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bmpxp_5a264bb6-3e63-4411-b0a4-95be21527653/kube-rbac-proxy/0.log" Jan 21 11:42:04 crc kubenswrapper[4925]: I0121 11:42:04.872314 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bmpxp_5a264bb6-3e63-4411-b0a4-95be21527653/machine-api-operator/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.078667 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr797" event={"ID":"5e941599-4ca1-4049-8f8a-dab0f762fcf9","Type":"ContainerStarted","Data":"35483944120af9b475ba14f36f745cce57a817e3ddd744a7f8f5c3dee1d13369"} Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.682156 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/extract/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.691128 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/util/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.698037 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_144d34eb9a01f64553f418551f09aae5c8acbbe5dabd7b229fc5e5452fms2cx_9c5f0822-d0f9-4273-8e7f-e2f91d277a01/pull/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.709147 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/extract/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.716599 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/util/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.728378 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_23550c8618544ac9ea89afd4ce99cda9256ff69faea7c95bed8068d414hqt75_06c4add2-f00d-4aea-8168-f165cdf2b7cf/pull/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.741681 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-r4klh_d8031329-a6ad-49da-881e-94db9f545ab7/manager/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.787075 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-vmg65_cf77bf31-5d25-4015-b274-05dbedbedf5a/manager/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.800155 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mjg4d_50c322c0-a941-48fa-bf86-c2daa64a9aa8/manager/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.810075 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-ggpw9_e1fe1f25-b3e4-4ed9-a44b-e9bdf6eedd23/manager/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.822619 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-h9szq_9b9f5cfa-93e1-4940-b7f0-066c6bc4f194/manager/0.log" Jan 21 11:42:06 crc kubenswrapper[4925]: I0121 11:42:06.832750 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-gcxp4_c4be49a0-e872-456f-a102-928f5210524f/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.089387 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr797" event={"ID":"5e941599-4ca1-4049-8f8a-dab0f762fcf9","Type":"ContainerDied","Data":"35483944120af9b475ba14f36f745cce57a817e3ddd744a7f8f5c3dee1d13369"} Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.089216 4925 generic.go:334] "Generic (PLEG): container finished" podID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerID="35483944120af9b475ba14f36f745cce57a817e3ddd744a7f8f5c3dee1d13369" exitCode=0 Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.092079 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-dqjpf_dbe9a043-a969-429b-b7b1-33d12296c52c/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.113379 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-wdwvl_2d8c2e69-7444-465a-a418-59d9c5b20074/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.261672 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-2znsh_2c47ce4c-9012-4798-9bf8-127a96ad285e/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.272595 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-x7474_fc8ec38e-f941-4ba0-863e-933e10bf2043/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.316341 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-b4cd2_6d27cfd1-683a-4e92-bcaf-40f1f370cd1b/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.327370 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-t9fng_398ea514-c4f3-40db-8421-ebf007fda30d/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.341698 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-nqldj_a7dd34dc-8a69-4c91-88ec-d1d7beffb15d/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.352013 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-44xwf_cc5d8922-f54d-42a1-b23a-622329e3f644/manager/0.log" Jan 21 11:42:07 crc kubenswrapper[4925]: I0121 11:42:07.366876 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854hdtp9_05db7c08-87f6-4518-8d61-c87cbf0b1735/manager/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.105976 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr797" event={"ID":"5e941599-4ca1-4049-8f8a-dab0f762fcf9","Type":"ContainerStarted","Data":"4e7b77ff52d75a984c992a5db79289b3c148b70ea5b268367e173697486cca56"} Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.136408 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wr797" podStartSLOduration=2.689737686 podStartE2EDuration="6.136364369s" podCreationTimestamp="2026-01-21 11:42:02 +0000 UTC" firstStartedPulling="2026-01-21 11:42:04.062382488 +0000 UTC m=+2815.666274422" lastFinishedPulling="2026-01-21 11:42:07.509009171 +0000 UTC m=+2819.112901105" observedRunningTime="2026-01-21 11:42:08.129789275 +0000 UTC m=+2819.733681209" watchObservedRunningTime="2026-01-21 11:42:08.136364369 +0000 UTC m=+2819.740256303" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.151277 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-87d6d564b-dgm28_be80c7ef-4f5f-4660-9954-5ab5b34655cf/manager/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.162471 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6jmrz_25cc6d46-b21a-463f-a13a-9874780c87f3/registry-server/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.191682 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-l9f98_a032309d-2543-4e6b-8207-d8097dffcaf5/manager/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.202288 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-84spn_0fb89ff9-2ba9-4a38-b739-43fa22a5b209/manager/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.223878 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-k7r2f_182d9a34-f024-4a86-8851-9e20d654f4ac/operator/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.237549 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-cq4k9_a9c52af6-912a-4e93-bbcd-42e961453471/manager/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.649911 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-gcwbr_d7429a44-6eeb-419b-8193-29275baf4ad9/manager/0.log" Jan 21 11:42:08 crc kubenswrapper[4925]: I0121 11:42:08.666693 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-hvtnz_fd15c43d-a647-467e-a4f1-eb0ca81a123f/manager/0.log" Jan 21 11:42:09 crc kubenswrapper[4925]: I0121 11:42:09.216075 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-696df99475-8gncw_d8433174-98d9-44ec-924f-6fe639538b64/manager/0.log" Jan 21 11:42:09 crc kubenswrapper[4925]: I0121 11:42:09.240376 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-index-lvk6z_2ac527d2-36c7-40bc-ae76-8d007f3dadb3/registry-server/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.492865 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.493212 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.501095 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-pbw2x_2b0b25f1-8430-459d-9805-e667615dc073/kube-multus-additional-cni-plugins/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.511779 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-pbw2x_2b0b25f1-8430-459d-9805-e667615dc073/egress-router-binary-copy/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.522560 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-pbw2x_2b0b25f1-8430-459d-9805-e667615dc073/cni-plugins/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.533489 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-pbw2x_2b0b25f1-8430-459d-9805-e667615dc073/bond-cni-plugin/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.541317 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-pbw2x_2b0b25f1-8430-459d-9805-e667615dc073/routeoverride-cni/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.551596 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-pbw2x_2b0b25f1-8430-459d-9805-e667615dc073/whereabouts-cni-bincopy/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.561230 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-pbw2x_2b0b25f1-8430-459d-9805-e667615dc073/whereabouts-cni/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.578900 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-k9srb_bebe6bc4-7b86-4688-ab28-408d5fc1ed7e/multus-admission-controller/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.585063 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-k9srb_bebe6bc4-7b86-4688-ab28-408d5fc1ed7e/kube-rbac-proxy/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.637822 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/2.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.667756 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hwzqb_82b678c3-b1e1-4294-9f9f-02103a6823cc/kube-multus/3.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.693090 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-2txwq_5c3596d1-1f08-4703-ab63-c29358aac0d9/network-metrics-daemon/0.log" Jan 21 11:42:12 crc kubenswrapper[4925]: I0121 11:42:12.701132 4925 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-2txwq_5c3596d1-1f08-4703-ab63-c29358aac0d9/kube-rbac-proxy/0.log" Jan 21 11:42:13 crc kubenswrapper[4925]: I0121 11:42:13.535864 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wr797" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="registry-server" probeResult="failure" output=< Jan 21 11:42:13 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:42:13 crc kubenswrapper[4925]: > Jan 21 11:42:22 crc kubenswrapper[4925]: I0121 11:42:22.548135 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:22 crc kubenswrapper[4925]: I0121 11:42:22.716467 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.090089 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wr797"] Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.090721 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wr797" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="registry-server" containerID="cri-o://4e7b77ff52d75a984c992a5db79289b3c148b70ea5b268367e173697486cca56" gracePeriod=2 Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.599790 4925 generic.go:334] "Generic (PLEG): container finished" podID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerID="4e7b77ff52d75a984c992a5db79289b3c148b70ea5b268367e173697486cca56" exitCode=0 Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.599853 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr797" event={"ID":"5e941599-4ca1-4049-8f8a-dab0f762fcf9","Type":"ContainerDied","Data":"4e7b77ff52d75a984c992a5db79289b3c148b70ea5b268367e173697486cca56"} Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.727084 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.889805 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-catalog-content\") pod \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.890014 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlxb4\" (UniqueName: \"kubernetes.io/projected/5e941599-4ca1-4049-8f8a-dab0f762fcf9-kube-api-access-zlxb4\") pod \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.890073 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-utilities\") pod \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\" (UID: \"5e941599-4ca1-4049-8f8a-dab0f762fcf9\") " Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.890995 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-utilities" (OuterVolumeSpecName: "utilities") pod "5e941599-4ca1-4049-8f8a-dab0f762fcf9" (UID: "5e941599-4ca1-4049-8f8a-dab0f762fcf9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.895488 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e941599-4ca1-4049-8f8a-dab0f762fcf9-kube-api-access-zlxb4" (OuterVolumeSpecName: "kube-api-access-zlxb4") pod "5e941599-4ca1-4049-8f8a-dab0f762fcf9" (UID: "5e941599-4ca1-4049-8f8a-dab0f762fcf9"). InnerVolumeSpecName "kube-api-access-zlxb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.992016 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlxb4\" (UniqueName: \"kubernetes.io/projected/5e941599-4ca1-4049-8f8a-dab0f762fcf9-kube-api-access-zlxb4\") on node \"crc\" DevicePath \"\"" Jan 21 11:42:26 crc kubenswrapper[4925]: I0121 11:42:26.992283 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.016439 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e941599-4ca1-4049-8f8a-dab0f762fcf9" (UID: "5e941599-4ca1-4049-8f8a-dab0f762fcf9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.093965 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e941599-4ca1-4049-8f8a-dab0f762fcf9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.615859 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr797" event={"ID":"5e941599-4ca1-4049-8f8a-dab0f762fcf9","Type":"ContainerDied","Data":"7058ef8086ef025c6e94455480088d01ea728a8f9c0fdcba51442cb82f3d592d"} Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.616360 4925 scope.go:117] "RemoveContainer" containerID="4e7b77ff52d75a984c992a5db79289b3c148b70ea5b268367e173697486cca56" Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.616107 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr797" Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.647080 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wr797"] Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.660918 4925 scope.go:117] "RemoveContainer" containerID="35483944120af9b475ba14f36f745cce57a817e3ddd744a7f8f5c3dee1d13369" Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.664044 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wr797"] Jan 21 11:42:27 crc kubenswrapper[4925]: I0121 11:42:27.700895 4925 scope.go:117] "RemoveContainer" containerID="5e28979350df4bdbb8a946976d88cf10c4eabfb301a4830649a380909a5dd8e0" Jan 21 11:42:29 crc kubenswrapper[4925]: I0121 11:42:29.580781 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" path="/var/lib/kubelet/pods/5e941599-4ca1-4049-8f8a-dab0f762fcf9/volumes" Jan 21 11:42:32 crc kubenswrapper[4925]: I0121 11:42:32.819001 4925 scope.go:117] "RemoveContainer" containerID="426fed9c2aea6aad00a0d70d99694643eb8d7aa0b1b803485ef89e80691ae95e" Jan 21 11:42:32 crc kubenswrapper[4925]: I0121 11:42:32.852743 4925 scope.go:117] "RemoveContainer" containerID="55cd76f2a9fa1e5b5b216e1542a7350fc67a4c8c120528918d53361e1d5fb46e" Jan 21 11:42:32 crc kubenswrapper[4925]: I0121 11:42:32.887952 4925 scope.go:117] "RemoveContainer" containerID="c0c0e93fe76e9cbd3775b519bb8b929708723b807151a63898ee378d2a72b673" Jan 21 11:42:32 crc kubenswrapper[4925]: I0121 11:42:32.912364 4925 scope.go:117] "RemoveContainer" containerID="a4873bd8419f25628a4d06624c9c7af93a0d117dd6763d940ffccbdb07370bea" Jan 21 11:42:32 crc kubenswrapper[4925]: I0121 11:42:32.944012 4925 scope.go:117] "RemoveContainer" containerID="2d41dd849a503bf131acf8dc139591987bac9a8034f9c4781b570f088537291c" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.148689 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-94z4h"] Jan 21 11:42:45 crc kubenswrapper[4925]: E0121 11:42:45.149623 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="extract-utilities" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.149638 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="extract-utilities" Jan 21 11:42:45 crc kubenswrapper[4925]: E0121 11:42:45.149660 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="extract-content" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.149667 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="extract-content" Jan 21 11:42:45 crc kubenswrapper[4925]: E0121 11:42:45.149686 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="registry-server" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.149693 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="registry-server" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.149877 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e941599-4ca1-4049-8f8a-dab0f762fcf9" containerName="registry-server" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.186771 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.205116 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-94z4h"] Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.301893 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-catalog-content\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.302059 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g4zj\" (UniqueName: \"kubernetes.io/projected/bb5af003-b785-4f73-9913-f3f7a5f26fae-kube-api-access-2g4zj\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.302092 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-utilities\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.403410 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g4zj\" (UniqueName: \"kubernetes.io/projected/bb5af003-b785-4f73-9913-f3f7a5f26fae-kube-api-access-2g4zj\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.403811 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-utilities\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.403961 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-catalog-content\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.404656 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-utilities\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.404880 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-catalog-content\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.424323 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g4zj\" (UniqueName: \"kubernetes.io/projected/bb5af003-b785-4f73-9913-f3f7a5f26fae-kube-api-access-2g4zj\") pod \"certified-operators-94z4h\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:45 crc kubenswrapper[4925]: I0121 11:42:45.530941 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:46 crc kubenswrapper[4925]: I0121 11:42:46.432164 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-94z4h"] Jan 21 11:42:46 crc kubenswrapper[4925]: W0121 11:42:46.451546 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb5af003_b785_4f73_9913_f3f7a5f26fae.slice/crio-c5acac9baca83383aa6bf588658cb56531f90724d106ab48a0e29fd515e031d9 WatchSource:0}: Error finding container c5acac9baca83383aa6bf588658cb56531f90724d106ab48a0e29fd515e031d9: Status 404 returned error can't find the container with id c5acac9baca83383aa6bf588658cb56531f90724d106ab48a0e29fd515e031d9 Jan 21 11:42:46 crc kubenswrapper[4925]: E0121 11:42:46.906899 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb5af003_b785_4f73_9913_f3f7a5f26fae.slice/crio-conmon-e5830af68dd0951c1a6db3b75661fdf95650d7456a3614544749a66b8e998abf.scope\": RecentStats: unable to find data in memory cache]" Jan 21 11:42:47 crc kubenswrapper[4925]: I0121 11:42:47.158166 4925 generic.go:334] "Generic (PLEG): container finished" podID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerID="e5830af68dd0951c1a6db3b75661fdf95650d7456a3614544749a66b8e998abf" exitCode=0 Jan 21 11:42:47 crc kubenswrapper[4925]: I0121 11:42:47.158221 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94z4h" event={"ID":"bb5af003-b785-4f73-9913-f3f7a5f26fae","Type":"ContainerDied","Data":"e5830af68dd0951c1a6db3b75661fdf95650d7456a3614544749a66b8e998abf"} Jan 21 11:42:47 crc kubenswrapper[4925]: I0121 11:42:47.158253 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94z4h" event={"ID":"bb5af003-b785-4f73-9913-f3f7a5f26fae","Type":"ContainerStarted","Data":"c5acac9baca83383aa6bf588658cb56531f90724d106ab48a0e29fd515e031d9"} Jan 21 11:42:47 crc kubenswrapper[4925]: I0121 11:42:47.160202 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:42:48 crc kubenswrapper[4925]: I0121 11:42:48.169579 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94z4h" event={"ID":"bb5af003-b785-4f73-9913-f3f7a5f26fae","Type":"ContainerStarted","Data":"f44e6d34909095fb5906d72bd4ca9b89a7fb352ad7395c5d089702ba84df1366"} Jan 21 11:42:49 crc kubenswrapper[4925]: I0121 11:42:49.182045 4925 generic.go:334] "Generic (PLEG): container finished" podID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerID="f44e6d34909095fb5906d72bd4ca9b89a7fb352ad7395c5d089702ba84df1366" exitCode=0 Jan 21 11:42:49 crc kubenswrapper[4925]: I0121 11:42:49.182174 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94z4h" event={"ID":"bb5af003-b785-4f73-9913-f3f7a5f26fae","Type":"ContainerDied","Data":"f44e6d34909095fb5906d72bd4ca9b89a7fb352ad7395c5d089702ba84df1366"} Jan 21 11:42:50 crc kubenswrapper[4925]: I0121 11:42:50.192751 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94z4h" event={"ID":"bb5af003-b785-4f73-9913-f3f7a5f26fae","Type":"ContainerStarted","Data":"242577d8a8012c6364999fbacccaf0b47db9487957cd3df1e25f169183aa07de"} Jan 21 11:42:50 crc kubenswrapper[4925]: I0121 11:42:50.220463 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-94z4h" podStartSLOduration=2.775889823 podStartE2EDuration="5.220437797s" podCreationTimestamp="2026-01-21 11:42:45 +0000 UTC" firstStartedPulling="2026-01-21 11:42:47.159920665 +0000 UTC m=+2858.763812599" lastFinishedPulling="2026-01-21 11:42:49.604468619 +0000 UTC m=+2861.208360573" observedRunningTime="2026-01-21 11:42:50.214286897 +0000 UTC m=+2861.818178831" watchObservedRunningTime="2026-01-21 11:42:50.220437797 +0000 UTC m=+2861.824329731" Jan 21 11:42:55 crc kubenswrapper[4925]: I0121 11:42:55.532235 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:55 crc kubenswrapper[4925]: I0121 11:42:55.532779 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:55 crc kubenswrapper[4925]: I0121 11:42:55.581555 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:56 crc kubenswrapper[4925]: I0121 11:42:56.292039 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:42:59 crc kubenswrapper[4925]: I0121 11:42:59.089878 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-94z4h"] Jan 21 11:42:59 crc kubenswrapper[4925]: I0121 11:42:59.090665 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-94z4h" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="registry-server" containerID="cri-o://242577d8a8012c6364999fbacccaf0b47db9487957cd3df1e25f169183aa07de" gracePeriod=2 Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.285214 4925 generic.go:334] "Generic (PLEG): container finished" podID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerID="242577d8a8012c6364999fbacccaf0b47db9487957cd3df1e25f169183aa07de" exitCode=0 Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.285291 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94z4h" event={"ID":"bb5af003-b785-4f73-9913-f3f7a5f26fae","Type":"ContainerDied","Data":"242577d8a8012c6364999fbacccaf0b47db9487957cd3df1e25f169183aa07de"} Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.727239 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.747468 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-utilities\") pod \"bb5af003-b785-4f73-9913-f3f7a5f26fae\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.747575 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-catalog-content\") pod \"bb5af003-b785-4f73-9913-f3f7a5f26fae\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.747655 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2g4zj\" (UniqueName: \"kubernetes.io/projected/bb5af003-b785-4f73-9913-f3f7a5f26fae-kube-api-access-2g4zj\") pod \"bb5af003-b785-4f73-9913-f3f7a5f26fae\" (UID: \"bb5af003-b785-4f73-9913-f3f7a5f26fae\") " Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.748834 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-utilities" (OuterVolumeSpecName: "utilities") pod "bb5af003-b785-4f73-9913-f3f7a5f26fae" (UID: "bb5af003-b785-4f73-9913-f3f7a5f26fae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.759491 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb5af003-b785-4f73-9913-f3f7a5f26fae-kube-api-access-2g4zj" (OuterVolumeSpecName: "kube-api-access-2g4zj") pod "bb5af003-b785-4f73-9913-f3f7a5f26fae" (UID: "bb5af003-b785-4f73-9913-f3f7a5f26fae"). InnerVolumeSpecName "kube-api-access-2g4zj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.816375 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb5af003-b785-4f73-9913-f3f7a5f26fae" (UID: "bb5af003-b785-4f73-9913-f3f7a5f26fae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.850860 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2g4zj\" (UniqueName: \"kubernetes.io/projected/bb5af003-b785-4f73-9913-f3f7a5f26fae-kube-api-access-2g4zj\") on node \"crc\" DevicePath \"\"" Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.850914 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:43:00 crc kubenswrapper[4925]: I0121 11:43:00.850936 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5af003-b785-4f73-9913-f3f7a5f26fae-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.298241 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94z4h" event={"ID":"bb5af003-b785-4f73-9913-f3f7a5f26fae","Type":"ContainerDied","Data":"c5acac9baca83383aa6bf588658cb56531f90724d106ab48a0e29fd515e031d9"} Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.298329 4925 scope.go:117] "RemoveContainer" containerID="242577d8a8012c6364999fbacccaf0b47db9487957cd3df1e25f169183aa07de" Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.298334 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94z4h" Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.326529 4925 scope.go:117] "RemoveContainer" containerID="f44e6d34909095fb5906d72bd4ca9b89a7fb352ad7395c5d089702ba84df1366" Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.356803 4925 scope.go:117] "RemoveContainer" containerID="e5830af68dd0951c1a6db3b75661fdf95650d7456a3614544749a66b8e998abf" Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.359365 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-94z4h"] Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.365655 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-94z4h"] Jan 21 11:43:01 crc kubenswrapper[4925]: I0121 11:43:01.518079 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" path="/var/lib/kubelet/pods/bb5af003-b785-4f73-9913-f3f7a5f26fae/volumes" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.016476 4925 scope.go:117] "RemoveContainer" containerID="7359f9cad1c1cf7ec0b32e455553d9d3b3ad9148115ee60a183c032da0d9f0b9" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.051997 4925 scope.go:117] "RemoveContainer" containerID="e8222fe2a62342451f65ccc371dbb52a1ddca925baa56e542a3acc9168591a3e" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.088684 4925 scope.go:117] "RemoveContainer" containerID="e2fd5052c5c331d244523f86308f7ca15f740afe82872aeb5164536ccaa22994" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.140475 4925 scope.go:117] "RemoveContainer" containerID="d7edb614a7ba71de83ba046014f935743a3428d27eb104ead7a521533718e873" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.163328 4925 scope.go:117] "RemoveContainer" containerID="89e6319de72d495e73439f260b648b87927152fb793b072e3744077e4be2fb2d" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.185299 4925 scope.go:117] "RemoveContainer" containerID="d99c3d55a9cfbb6dbbcc9813de8123a4bf40b2d6e46f6b7f1701cbb263ec9ffd" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.216202 4925 scope.go:117] "RemoveContainer" containerID="dab276c7373336a43c392d22aaf42a68b8b7f9e4b4fa6accad91ced7556aa2e3" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.253502 4925 scope.go:117] "RemoveContainer" containerID="86260796942f1ba88f9c7d449270684752bfd79685c5d81f380dd2acb1eb3dd6" Jan 21 11:43:33 crc kubenswrapper[4925]: I0121 11:43:33.335326 4925 scope.go:117] "RemoveContainer" containerID="e0d0d2f5376d86f549374a5afd726fd09d3be22cd1c1dd2df5b90480e175fd56" Jan 21 11:43:49 crc kubenswrapper[4925]: I0121 11:43:49.941151 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:43:49 crc kubenswrapper[4925]: I0121 11:43:49.942006 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:44:15 crc kubenswrapper[4925]: I0121 11:44:15.922340 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jwkpm"] Jan 21 11:44:15 crc kubenswrapper[4925]: E0121 11:44:15.923991 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="extract-content" Jan 21 11:44:15 crc kubenswrapper[4925]: I0121 11:44:15.924015 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="extract-content" Jan 21 11:44:15 crc kubenswrapper[4925]: E0121 11:44:15.924065 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="extract-utilities" Jan 21 11:44:15 crc kubenswrapper[4925]: I0121 11:44:15.924076 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="extract-utilities" Jan 21 11:44:15 crc kubenswrapper[4925]: E0121 11:44:15.924247 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="registry-server" Jan 21 11:44:15 crc kubenswrapper[4925]: I0121 11:44:15.924260 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="registry-server" Jan 21 11:44:15 crc kubenswrapper[4925]: I0121 11:44:15.924786 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb5af003-b785-4f73-9913-f3f7a5f26fae" containerName="registry-server" Jan 21 11:44:15 crc kubenswrapper[4925]: I0121 11:44:15.930125 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.130813 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jwkpm"] Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.137189 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-244m9\" (UniqueName: \"kubernetes.io/projected/67b15615-10ec-4bdb-b241-c82d5cba850f-kube-api-access-244m9\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.137277 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-catalog-content\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.137318 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-utilities\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.238805 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-catalog-content\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.238878 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-utilities\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.238989 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-244m9\" (UniqueName: \"kubernetes.io/projected/67b15615-10ec-4bdb-b241-c82d5cba850f-kube-api-access-244m9\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.239553 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-catalog-content\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.239658 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-utilities\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.261228 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-244m9\" (UniqueName: \"kubernetes.io/projected/67b15615-10ec-4bdb-b241-c82d5cba850f-kube-api-access-244m9\") pod \"community-operators-jwkpm\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:16 crc kubenswrapper[4925]: I0121 11:44:16.444122 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:17 crc kubenswrapper[4925]: I0121 11:44:17.080952 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jwkpm"] Jan 21 11:44:17 crc kubenswrapper[4925]: I0121 11:44:17.531286 4925 generic.go:334] "Generic (PLEG): container finished" podID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerID="c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79" exitCode=0 Jan 21 11:44:17 crc kubenswrapper[4925]: I0121 11:44:17.531355 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jwkpm" event={"ID":"67b15615-10ec-4bdb-b241-c82d5cba850f","Type":"ContainerDied","Data":"c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79"} Jan 21 11:44:17 crc kubenswrapper[4925]: I0121 11:44:17.531660 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jwkpm" event={"ID":"67b15615-10ec-4bdb-b241-c82d5cba850f","Type":"ContainerStarted","Data":"bda550b3d58c12c4fb44078b41df1cbc257480a084c27695201b94f64a280ef3"} Jan 21 11:44:18 crc kubenswrapper[4925]: I0121 11:44:18.540918 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jwkpm" event={"ID":"67b15615-10ec-4bdb-b241-c82d5cba850f","Type":"ContainerStarted","Data":"5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5"} Jan 21 11:44:18 crc kubenswrapper[4925]: E0121 11:44:18.969992 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67b15615_10ec_4bdb_b241_c82d5cba850f.slice/crio-conmon-5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5.scope\": RecentStats: unable to find data in memory cache]" Jan 21 11:44:19 crc kubenswrapper[4925]: I0121 11:44:19.550146 4925 generic.go:334] "Generic (PLEG): container finished" podID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerID="5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5" exitCode=0 Jan 21 11:44:19 crc kubenswrapper[4925]: I0121 11:44:19.550192 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jwkpm" event={"ID":"67b15615-10ec-4bdb-b241-c82d5cba850f","Type":"ContainerDied","Data":"5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5"} Jan 21 11:44:20 crc kubenswrapper[4925]: I0121 11:44:19.941383 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:44:20 crc kubenswrapper[4925]: I0121 11:44:19.941846 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:44:20 crc kubenswrapper[4925]: I0121 11:44:20.560544 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jwkpm" event={"ID":"67b15615-10ec-4bdb-b241-c82d5cba850f","Type":"ContainerStarted","Data":"01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4"} Jan 21 11:44:20 crc kubenswrapper[4925]: I0121 11:44:20.592295 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jwkpm" podStartSLOduration=3.075845441 podStartE2EDuration="5.592255508s" podCreationTimestamp="2026-01-21 11:44:15 +0000 UTC" firstStartedPulling="2026-01-21 11:44:17.533210425 +0000 UTC m=+2949.137102349" lastFinishedPulling="2026-01-21 11:44:20.049620482 +0000 UTC m=+2951.653512416" observedRunningTime="2026-01-21 11:44:20.582693448 +0000 UTC m=+2952.186585382" watchObservedRunningTime="2026-01-21 11:44:20.592255508 +0000 UTC m=+2952.196147442" Jan 21 11:44:26 crc kubenswrapper[4925]: I0121 11:44:26.444380 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:26 crc kubenswrapper[4925]: I0121 11:44:26.445822 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:26 crc kubenswrapper[4925]: I0121 11:44:26.508227 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:26 crc kubenswrapper[4925]: I0121 11:44:26.659630 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:30 crc kubenswrapper[4925]: I0121 11:44:30.108853 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jwkpm"] Jan 21 11:44:30 crc kubenswrapper[4925]: I0121 11:44:30.109615 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jwkpm" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="registry-server" containerID="cri-o://01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4" gracePeriod=2 Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.107427 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.207610 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-244m9\" (UniqueName: \"kubernetes.io/projected/67b15615-10ec-4bdb-b241-c82d5cba850f-kube-api-access-244m9\") pod \"67b15615-10ec-4bdb-b241-c82d5cba850f\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.207674 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-catalog-content\") pod \"67b15615-10ec-4bdb-b241-c82d5cba850f\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.207723 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-utilities\") pod \"67b15615-10ec-4bdb-b241-c82d5cba850f\" (UID: \"67b15615-10ec-4bdb-b241-c82d5cba850f\") " Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.210222 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-utilities" (OuterVolumeSpecName: "utilities") pod "67b15615-10ec-4bdb-b241-c82d5cba850f" (UID: "67b15615-10ec-4bdb-b241-c82d5cba850f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.214026 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67b15615-10ec-4bdb-b241-c82d5cba850f-kube-api-access-244m9" (OuterVolumeSpecName: "kube-api-access-244m9") pod "67b15615-10ec-4bdb-b241-c82d5cba850f" (UID: "67b15615-10ec-4bdb-b241-c82d5cba850f"). InnerVolumeSpecName "kube-api-access-244m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.283612 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67b15615-10ec-4bdb-b241-c82d5cba850f" (UID: "67b15615-10ec-4bdb-b241-c82d5cba850f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.309223 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-244m9\" (UniqueName: \"kubernetes.io/projected/67b15615-10ec-4bdb-b241-c82d5cba850f-kube-api-access-244m9\") on node \"crc\" DevicePath \"\"" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.309265 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.309275 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b15615-10ec-4bdb-b241-c82d5cba850f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.660332 4925 generic.go:334] "Generic (PLEG): container finished" podID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerID="01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4" exitCode=0 Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.660417 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jwkpm" event={"ID":"67b15615-10ec-4bdb-b241-c82d5cba850f","Type":"ContainerDied","Data":"01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4"} Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.660457 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jwkpm" event={"ID":"67b15615-10ec-4bdb-b241-c82d5cba850f","Type":"ContainerDied","Data":"bda550b3d58c12c4fb44078b41df1cbc257480a084c27695201b94f64a280ef3"} Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.660527 4925 scope.go:117] "RemoveContainer" containerID="01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.660687 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jwkpm" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.692410 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jwkpm"] Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.709575 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jwkpm"] Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.713134 4925 scope.go:117] "RemoveContainer" containerID="5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.737610 4925 scope.go:117] "RemoveContainer" containerID="c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.773916 4925 scope.go:117] "RemoveContainer" containerID="01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4" Jan 21 11:44:31 crc kubenswrapper[4925]: E0121 11:44:31.774810 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4\": container with ID starting with 01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4 not found: ID does not exist" containerID="01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.774853 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4"} err="failed to get container status \"01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4\": rpc error: code = NotFound desc = could not find container \"01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4\": container with ID starting with 01fb47119b3bd5ba0d29372804e9b82b75a0fe7d00682ce0ca06e45f507c07a4 not found: ID does not exist" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.774880 4925 scope.go:117] "RemoveContainer" containerID="5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5" Jan 21 11:44:31 crc kubenswrapper[4925]: E0121 11:44:31.775161 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5\": container with ID starting with 5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5 not found: ID does not exist" containerID="5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.775190 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5"} err="failed to get container status \"5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5\": rpc error: code = NotFound desc = could not find container \"5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5\": container with ID starting with 5af2fd2a62a80d2c74fad3f184749b5762dd5e1e8d37ceb6e9f442126417ade5 not found: ID does not exist" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.775205 4925 scope.go:117] "RemoveContainer" containerID="c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79" Jan 21 11:44:31 crc kubenswrapper[4925]: E0121 11:44:31.776166 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79\": container with ID starting with c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79 not found: ID does not exist" containerID="c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79" Jan 21 11:44:31 crc kubenswrapper[4925]: I0121 11:44:31.776196 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79"} err="failed to get container status \"c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79\": rpc error: code = NotFound desc = could not find container \"c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79\": container with ID starting with c9690c441a32b11a067cd806da054928d22ed1b6188df10eb638c789f9c77c79 not found: ID does not exist" Jan 21 11:44:33 crc kubenswrapper[4925]: I0121 11:44:33.513055 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" path="/var/lib/kubelet/pods/67b15615-10ec-4bdb-b241-c82d5cba850f/volumes" Jan 21 11:44:33 crc kubenswrapper[4925]: I0121 11:44:33.557676 4925 scope.go:117] "RemoveContainer" containerID="648418b220e1e235e2049d99914bc98ac17d884fb07a3e47ad72eb36b6fec672" Jan 21 11:44:33 crc kubenswrapper[4925]: I0121 11:44:33.613566 4925 scope.go:117] "RemoveContainer" containerID="e77362fc75425c5d7bd8f945869d7962a047b58e56f348e94279ef1e78a5fde0" Jan 21 11:44:49 crc kubenswrapper[4925]: I0121 11:44:49.941176 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:44:49 crc kubenswrapper[4925]: I0121 11:44:49.941895 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:44:49 crc kubenswrapper[4925]: I0121 11:44:49.941978 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:44:49 crc kubenswrapper[4925]: I0121 11:44:49.943291 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2529be439eb059316b87a73fdab3300524b65045be77bd9c5795f114c7d0f947"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:44:49 crc kubenswrapper[4925]: I0121 11:44:49.943372 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://2529be439eb059316b87a73fdab3300524b65045be77bd9c5795f114c7d0f947" gracePeriod=600 Jan 21 11:44:50 crc kubenswrapper[4925]: I0121 11:44:50.867025 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="2529be439eb059316b87a73fdab3300524b65045be77bd9c5795f114c7d0f947" exitCode=0 Jan 21 11:44:50 crc kubenswrapper[4925]: I0121 11:44:50.867700 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"2529be439eb059316b87a73fdab3300524b65045be77bd9c5795f114c7d0f947"} Jan 21 11:44:50 crc kubenswrapper[4925]: I0121 11:44:50.867752 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab"} Jan 21 11:44:50 crc kubenswrapper[4925]: I0121 11:44:50.867775 4925 scope.go:117] "RemoveContainer" containerID="9733a45707ce13e1d67996b8c7b5063de72b923a6f93d77c4240652a31b7331e" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.515290 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mlczm"] Jan 21 11:44:55 crc kubenswrapper[4925]: E0121 11:44:55.516360 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="extract-utilities" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.516377 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="extract-utilities" Jan 21 11:44:55 crc kubenswrapper[4925]: E0121 11:44:55.516429 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="registry-server" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.516439 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="registry-server" Jan 21 11:44:55 crc kubenswrapper[4925]: E0121 11:44:55.516452 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="extract-content" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.516460 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="extract-content" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.516685 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="67b15615-10ec-4bdb-b241-c82d5cba850f" containerName="registry-server" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.518335 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.521608 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mlczm"] Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.709298 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftchm\" (UniqueName: \"kubernetes.io/projected/87f9ef89-369b-4f72-8aa7-1cd10314258d-kube-api-access-ftchm\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.709521 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-catalog-content\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.709567 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-utilities\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.810880 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftchm\" (UniqueName: \"kubernetes.io/projected/87f9ef89-369b-4f72-8aa7-1cd10314258d-kube-api-access-ftchm\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.810970 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-catalog-content\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.810989 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-utilities\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.811561 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-utilities\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.811644 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-catalog-content\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.836412 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftchm\" (UniqueName: \"kubernetes.io/projected/87f9ef89-369b-4f72-8aa7-1cd10314258d-kube-api-access-ftchm\") pod \"redhat-marketplace-mlczm\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:55 crc kubenswrapper[4925]: I0121 11:44:55.839131 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:44:56 crc kubenswrapper[4925]: I0121 11:44:56.430580 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mlczm"] Jan 21 11:44:56 crc kubenswrapper[4925]: I0121 11:44:56.955623 4925 generic.go:334] "Generic (PLEG): container finished" podID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerID="2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643" exitCode=0 Jan 21 11:44:56 crc kubenswrapper[4925]: I0121 11:44:56.955714 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mlczm" event={"ID":"87f9ef89-369b-4f72-8aa7-1cd10314258d","Type":"ContainerDied","Data":"2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643"} Jan 21 11:44:56 crc kubenswrapper[4925]: I0121 11:44:56.955831 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mlczm" event={"ID":"87f9ef89-369b-4f72-8aa7-1cd10314258d","Type":"ContainerStarted","Data":"8706c3041b2bc84de7a219b0a540253dba6c585fccfa700525c153ccb54b2ae4"} Jan 21 11:44:57 crc kubenswrapper[4925]: I0121 11:44:57.964457 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mlczm" event={"ID":"87f9ef89-369b-4f72-8aa7-1cd10314258d","Type":"ContainerStarted","Data":"9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be"} Jan 21 11:44:58 crc kubenswrapper[4925]: I0121 11:44:58.978015 4925 generic.go:334] "Generic (PLEG): container finished" podID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerID="9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be" exitCode=0 Jan 21 11:44:58 crc kubenswrapper[4925]: I0121 11:44:58.978088 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mlczm" event={"ID":"87f9ef89-369b-4f72-8aa7-1cd10314258d","Type":"ContainerDied","Data":"9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be"} Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.001477 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mlczm" event={"ID":"87f9ef89-369b-4f72-8aa7-1cd10314258d","Type":"ContainerStarted","Data":"79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5"} Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.043734 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mlczm" podStartSLOduration=2.598327044 podStartE2EDuration="5.043686894s" podCreationTimestamp="2026-01-21 11:44:55 +0000 UTC" firstStartedPulling="2026-01-21 11:44:56.958713238 +0000 UTC m=+2988.562605162" lastFinishedPulling="2026-01-21 11:44:59.404073068 +0000 UTC m=+2991.007965012" observedRunningTime="2026-01-21 11:45:00.018704882 +0000 UTC m=+2991.622596816" watchObservedRunningTime="2026-01-21 11:45:00.043686894 +0000 UTC m=+2991.647578828" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.168416 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z"] Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.170318 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.173818 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.174953 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.180901 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z"] Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.204103 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3325e422-dbe5-4937-a209-f3a477899616-secret-volume\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.204205 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjfcx\" (UniqueName: \"kubernetes.io/projected/3325e422-dbe5-4937-a209-f3a477899616-kube-api-access-pjfcx\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.204326 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3325e422-dbe5-4937-a209-f3a477899616-config-volume\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.305664 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3325e422-dbe5-4937-a209-f3a477899616-config-volume\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.306130 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3325e422-dbe5-4937-a209-f3a477899616-secret-volume\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.306314 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjfcx\" (UniqueName: \"kubernetes.io/projected/3325e422-dbe5-4937-a209-f3a477899616-kube-api-access-pjfcx\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.307064 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3325e422-dbe5-4937-a209-f3a477899616-config-volume\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.320935 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3325e422-dbe5-4937-a209-f3a477899616-secret-volume\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.331654 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjfcx\" (UniqueName: \"kubernetes.io/projected/3325e422-dbe5-4937-a209-f3a477899616-kube-api-access-pjfcx\") pod \"collect-profiles-29483265-mgz4z\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:00 crc kubenswrapper[4925]: I0121 11:45:00.498905 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:01 crc kubenswrapper[4925]: I0121 11:45:01.356311 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z"] Jan 21 11:45:02 crc kubenswrapper[4925]: I0121 11:45:02.028902 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" event={"ID":"3325e422-dbe5-4937-a209-f3a477899616","Type":"ContainerStarted","Data":"5ec1e6077c75836336a0bd4f7aa17c1502cfb0204b5b10426d5f62b776a7b930"} Jan 21 11:45:02 crc kubenswrapper[4925]: I0121 11:45:02.029181 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" event={"ID":"3325e422-dbe5-4937-a209-f3a477899616","Type":"ContainerStarted","Data":"e6fa6968fca25a1a2be89cf7405c7e410a3d2e3697353c7c652ca63bb5c9e9aa"} Jan 21 11:45:03 crc kubenswrapper[4925]: I0121 11:45:03.152927 4925 generic.go:334] "Generic (PLEG): container finished" podID="3325e422-dbe5-4937-a209-f3a477899616" containerID="5ec1e6077c75836336a0bd4f7aa17c1502cfb0204b5b10426d5f62b776a7b930" exitCode=0 Jan 21 11:45:03 crc kubenswrapper[4925]: I0121 11:45:03.152972 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" event={"ID":"3325e422-dbe5-4937-a209-f3a477899616","Type":"ContainerDied","Data":"5ec1e6077c75836336a0bd4f7aa17c1502cfb0204b5b10426d5f62b776a7b930"} Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.582875 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.688037 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3325e422-dbe5-4937-a209-f3a477899616-secret-volume\") pod \"3325e422-dbe5-4937-a209-f3a477899616\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.688230 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjfcx\" (UniqueName: \"kubernetes.io/projected/3325e422-dbe5-4937-a209-f3a477899616-kube-api-access-pjfcx\") pod \"3325e422-dbe5-4937-a209-f3a477899616\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.688272 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3325e422-dbe5-4937-a209-f3a477899616-config-volume\") pod \"3325e422-dbe5-4937-a209-f3a477899616\" (UID: \"3325e422-dbe5-4937-a209-f3a477899616\") " Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.689062 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3325e422-dbe5-4937-a209-f3a477899616-config-volume" (OuterVolumeSpecName: "config-volume") pod "3325e422-dbe5-4937-a209-f3a477899616" (UID: "3325e422-dbe5-4937-a209-f3a477899616"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.689635 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3325e422-dbe5-4937-a209-f3a477899616-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.695793 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3325e422-dbe5-4937-a209-f3a477899616-kube-api-access-pjfcx" (OuterVolumeSpecName: "kube-api-access-pjfcx") pod "3325e422-dbe5-4937-a209-f3a477899616" (UID: "3325e422-dbe5-4937-a209-f3a477899616"). InnerVolumeSpecName "kube-api-access-pjfcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.705272 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3325e422-dbe5-4937-a209-f3a477899616-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3325e422-dbe5-4937-a209-f3a477899616" (UID: "3325e422-dbe5-4937-a209-f3a477899616"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.791498 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjfcx\" (UniqueName: \"kubernetes.io/projected/3325e422-dbe5-4937-a209-f3a477899616-kube-api-access-pjfcx\") on node \"crc\" DevicePath \"\"" Jan 21 11:45:04 crc kubenswrapper[4925]: I0121 11:45:04.791547 4925 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3325e422-dbe5-4937-a209-f3a477899616-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.290104 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" event={"ID":"3325e422-dbe5-4937-a209-f3a477899616","Type":"ContainerDied","Data":"e6fa6968fca25a1a2be89cf7405c7e410a3d2e3697353c7c652ca63bb5c9e9aa"} Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.290144 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6fa6968fca25a1a2be89cf7405c7e410a3d2e3697353c7c652ca63bb5c9e9aa" Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.290206 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483265-mgz4z" Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.693786 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp"] Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.704187 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483220-hx8hp"] Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.840116 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.840189 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:45:05 crc kubenswrapper[4925]: I0121 11:45:05.929566 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:45:06 crc kubenswrapper[4925]: I0121 11:45:06.356685 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:45:07 crc kubenswrapper[4925]: I0121 11:45:07.632008 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50f7361b-c2aa-49d1-8300-88ccc99af201" path="/var/lib/kubelet/pods/50f7361b-c2aa-49d1-8300-88ccc99af201/volumes" Jan 21 11:45:09 crc kubenswrapper[4925]: I0121 11:45:09.493717 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mlczm"] Jan 21 11:45:09 crc kubenswrapper[4925]: I0121 11:45:09.495234 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mlczm" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="registry-server" containerID="cri-o://79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5" gracePeriod=2 Jan 21 11:45:09 crc kubenswrapper[4925]: I0121 11:45:09.990023 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.219944 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-catalog-content\") pod \"87f9ef89-369b-4f72-8aa7-1cd10314258d\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.220328 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftchm\" (UniqueName: \"kubernetes.io/projected/87f9ef89-369b-4f72-8aa7-1cd10314258d-kube-api-access-ftchm\") pod \"87f9ef89-369b-4f72-8aa7-1cd10314258d\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.220531 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-utilities\") pod \"87f9ef89-369b-4f72-8aa7-1cd10314258d\" (UID: \"87f9ef89-369b-4f72-8aa7-1cd10314258d\") " Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.221700 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-utilities" (OuterVolumeSpecName: "utilities") pod "87f9ef89-369b-4f72-8aa7-1cd10314258d" (UID: "87f9ef89-369b-4f72-8aa7-1cd10314258d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.228158 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87f9ef89-369b-4f72-8aa7-1cd10314258d-kube-api-access-ftchm" (OuterVolumeSpecName: "kube-api-access-ftchm") pod "87f9ef89-369b-4f72-8aa7-1cd10314258d" (UID: "87f9ef89-369b-4f72-8aa7-1cd10314258d"). InnerVolumeSpecName "kube-api-access-ftchm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.254168 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87f9ef89-369b-4f72-8aa7-1cd10314258d" (UID: "87f9ef89-369b-4f72-8aa7-1cd10314258d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.322722 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.322830 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftchm\" (UniqueName: \"kubernetes.io/projected/87f9ef89-369b-4f72-8aa7-1cd10314258d-kube-api-access-ftchm\") on node \"crc\" DevicePath \"\"" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.322848 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87f9ef89-369b-4f72-8aa7-1cd10314258d-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.349576 4925 generic.go:334] "Generic (PLEG): container finished" podID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerID="79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5" exitCode=0 Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.349621 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mlczm" event={"ID":"87f9ef89-369b-4f72-8aa7-1cd10314258d","Type":"ContainerDied","Data":"79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5"} Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.349654 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mlczm" event={"ID":"87f9ef89-369b-4f72-8aa7-1cd10314258d","Type":"ContainerDied","Data":"8706c3041b2bc84de7a219b0a540253dba6c585fccfa700525c153ccb54b2ae4"} Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.349671 4925 scope.go:117] "RemoveContainer" containerID="79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.349714 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mlczm" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.383346 4925 scope.go:117] "RemoveContainer" containerID="9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.385039 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mlczm"] Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.397384 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mlczm"] Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.407473 4925 scope.go:117] "RemoveContainer" containerID="2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.451342 4925 scope.go:117] "RemoveContainer" containerID="79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5" Jan 21 11:45:10 crc kubenswrapper[4925]: E0121 11:45:10.451945 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5\": container with ID starting with 79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5 not found: ID does not exist" containerID="79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.451979 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5"} err="failed to get container status \"79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5\": rpc error: code = NotFound desc = could not find container \"79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5\": container with ID starting with 79a5e5d01587ea9cda32e80e81ef6b5adfef986c01d27cbc4bc65559f4b429b5 not found: ID does not exist" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.452002 4925 scope.go:117] "RemoveContainer" containerID="9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be" Jan 21 11:45:10 crc kubenswrapper[4925]: E0121 11:45:10.452361 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be\": container with ID starting with 9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be not found: ID does not exist" containerID="9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.452427 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be"} err="failed to get container status \"9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be\": rpc error: code = NotFound desc = could not find container \"9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be\": container with ID starting with 9ac01f880a25070f9c18a47d92562631ecb8d116011fa99cf455e2e868a1d2be not found: ID does not exist" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.452458 4925 scope.go:117] "RemoveContainer" containerID="2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643" Jan 21 11:45:10 crc kubenswrapper[4925]: E0121 11:45:10.452821 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643\": container with ID starting with 2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643 not found: ID does not exist" containerID="2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643" Jan 21 11:45:10 crc kubenswrapper[4925]: I0121 11:45:10.452894 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643"} err="failed to get container status \"2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643\": rpc error: code = NotFound desc = could not find container \"2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643\": container with ID starting with 2bbf9db7709ac98f41f601cdfe429c23eadd0f105a2e0ad966b76bff2c311643 not found: ID does not exist" Jan 21 11:45:11 crc kubenswrapper[4925]: I0121 11:45:11.511773 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" path="/var/lib/kubelet/pods/87f9ef89-369b-4f72-8aa7-1cd10314258d/volumes" Jan 21 11:45:33 crc kubenswrapper[4925]: I0121 11:45:33.702244 4925 scope.go:117] "RemoveContainer" containerID="d43006dfb4bcda468e86fda26ee219adf1b78ffafa2f5c1dd431af6708c79fe7" Jan 21 11:47:19 crc kubenswrapper[4925]: I0121 11:47:19.941636 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:47:19 crc kubenswrapper[4925]: I0121 11:47:19.942284 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:47:49 crc kubenswrapper[4925]: I0121 11:47:49.940926 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:47:49 crc kubenswrapper[4925]: I0121 11:47:49.941612 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:48:19 crc kubenswrapper[4925]: I0121 11:48:19.940877 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:48:19 crc kubenswrapper[4925]: I0121 11:48:19.943722 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:48:19 crc kubenswrapper[4925]: I0121 11:48:19.943945 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:48:19 crc kubenswrapper[4925]: I0121 11:48:19.944935 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:48:19 crc kubenswrapper[4925]: I0121 11:48:19.945643 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" gracePeriod=600 Jan 21 11:48:20 crc kubenswrapper[4925]: E0121 11:48:20.084542 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:48:20 crc kubenswrapper[4925]: I0121 11:48:20.519235 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" exitCode=0 Jan 21 11:48:20 crc kubenswrapper[4925]: I0121 11:48:20.519298 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab"} Jan 21 11:48:20 crc kubenswrapper[4925]: I0121 11:48:20.519375 4925 scope.go:117] "RemoveContainer" containerID="2529be439eb059316b87a73fdab3300524b65045be77bd9c5795f114c7d0f947" Jan 21 11:48:20 crc kubenswrapper[4925]: I0121 11:48:20.520075 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:48:20 crc kubenswrapper[4925]: E0121 11:48:20.520507 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:48:32 crc kubenswrapper[4925]: I0121 11:48:32.502258 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:48:32 crc kubenswrapper[4925]: E0121 11:48:32.504625 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:48:44 crc kubenswrapper[4925]: I0121 11:48:44.502224 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:48:44 crc kubenswrapper[4925]: E0121 11:48:44.503179 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:48:55 crc kubenswrapper[4925]: I0121 11:48:55.505244 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:48:55 crc kubenswrapper[4925]: E0121 11:48:55.505917 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:49:07 crc kubenswrapper[4925]: I0121 11:49:07.502492 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:49:07 crc kubenswrapper[4925]: E0121 11:49:07.503421 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:49:18 crc kubenswrapper[4925]: I0121 11:49:18.502695 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:49:18 crc kubenswrapper[4925]: E0121 11:49:18.504235 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:49:32 crc kubenswrapper[4925]: I0121 11:49:32.502603 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:49:32 crc kubenswrapper[4925]: E0121 11:49:32.503485 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:49:44 crc kubenswrapper[4925]: I0121 11:49:44.502975 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:49:44 crc kubenswrapper[4925]: E0121 11:49:44.503633 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:49:58 crc kubenswrapper[4925]: I0121 11:49:58.501444 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:49:58 crc kubenswrapper[4925]: E0121 11:49:58.502218 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:50:13 crc kubenswrapper[4925]: I0121 11:50:13.501845 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:50:13 crc kubenswrapper[4925]: E0121 11:50:13.502499 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:50:26 crc kubenswrapper[4925]: I0121 11:50:26.502892 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:50:26 crc kubenswrapper[4925]: E0121 11:50:26.503830 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:50:38 crc kubenswrapper[4925]: I0121 11:50:38.502495 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:50:38 crc kubenswrapper[4925]: E0121 11:50:38.503207 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:50:49 crc kubenswrapper[4925]: I0121 11:50:49.722213 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:50:49 crc kubenswrapper[4925]: E0121 11:50:49.723320 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:51:00 crc kubenswrapper[4925]: I0121 11:51:00.502370 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:51:00 crc kubenswrapper[4925]: E0121 11:51:00.503212 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:51:15 crc kubenswrapper[4925]: I0121 11:51:15.516360 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:51:15 crc kubenswrapper[4925]: E0121 11:51:15.517048 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:51:29 crc kubenswrapper[4925]: I0121 11:51:29.508006 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:51:29 crc kubenswrapper[4925]: E0121 11:51:29.508941 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:51:40 crc kubenswrapper[4925]: I0121 11:51:40.502811 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:51:40 crc kubenswrapper[4925]: E0121 11:51:40.503902 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:51:51 crc kubenswrapper[4925]: I0121 11:51:51.502342 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:51:51 crc kubenswrapper[4925]: E0121 11:51:51.503460 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:52:06 crc kubenswrapper[4925]: I0121 11:52:06.501575 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:52:06 crc kubenswrapper[4925]: E0121 11:52:06.502569 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:52:21 crc kubenswrapper[4925]: I0121 11:52:21.503439 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:52:21 crc kubenswrapper[4925]: E0121 11:52:21.504965 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:52:34 crc kubenswrapper[4925]: I0121 11:52:34.503774 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:52:34 crc kubenswrapper[4925]: E0121 11:52:34.504890 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:52:45 crc kubenswrapper[4925]: I0121 11:52:45.502381 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:52:45 crc kubenswrapper[4925]: E0121 11:52:45.503199 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:52:57 crc kubenswrapper[4925]: I0121 11:52:57.507494 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:52:57 crc kubenswrapper[4925]: E0121 11:52:57.508346 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:53:11 crc kubenswrapper[4925]: I0121 11:53:11.502018 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:53:11 crc kubenswrapper[4925]: E0121 11:53:11.502883 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 11:53:26 crc kubenswrapper[4925]: I0121 11:53:26.502472 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:53:27 crc kubenswrapper[4925]: I0121 11:53:27.140967 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"bb30684795a391256ad4ef320afe2396f497f78bec3c7653ede220c3029ce93b"} Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.659351 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p72m7"] Jan 21 11:54:06 crc kubenswrapper[4925]: E0121 11:54:06.660360 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3325e422-dbe5-4937-a209-f3a477899616" containerName="collect-profiles" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.660378 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3325e422-dbe5-4937-a209-f3a477899616" containerName="collect-profiles" Jan 21 11:54:06 crc kubenswrapper[4925]: E0121 11:54:06.663116 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="registry-server" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.663146 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="registry-server" Jan 21 11:54:06 crc kubenswrapper[4925]: E0121 11:54:06.663169 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="extract-utilities" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.663179 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="extract-utilities" Jan 21 11:54:06 crc kubenswrapper[4925]: E0121 11:54:06.663207 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="extract-content" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.663234 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="extract-content" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.663561 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="87f9ef89-369b-4f72-8aa7-1cd10314258d" containerName="registry-server" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.663605 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3325e422-dbe5-4937-a209-f3a477899616" containerName="collect-profiles" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.665481 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.680665 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p72m7"] Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.858234 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-utilities\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.858312 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-catalog-content\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.858772 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppwtp\" (UniqueName: \"kubernetes.io/projected/5b3bb4bd-7927-4afa-b7cb-35199e80726f-kube-api-access-ppwtp\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.961192 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppwtp\" (UniqueName: \"kubernetes.io/projected/5b3bb4bd-7927-4afa-b7cb-35199e80726f-kube-api-access-ppwtp\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.961437 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-utilities\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.961480 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-catalog-content\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.962051 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-utilities\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.962127 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-catalog-content\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:06 crc kubenswrapper[4925]: I0121 11:54:06.985307 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppwtp\" (UniqueName: \"kubernetes.io/projected/5b3bb4bd-7927-4afa-b7cb-35199e80726f-kube-api-access-ppwtp\") pod \"certified-operators-p72m7\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:07 crc kubenswrapper[4925]: I0121 11:54:07.286140 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:07 crc kubenswrapper[4925]: I0121 11:54:07.931927 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p72m7"] Jan 21 11:54:08 crc kubenswrapper[4925]: I0121 11:54:08.012336 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p72m7" event={"ID":"5b3bb4bd-7927-4afa-b7cb-35199e80726f","Type":"ContainerStarted","Data":"ba33c0d694ccfee6853d1cd086eeb895450e64d2c223cd528057c000054e22fd"} Jan 21 11:54:09 crc kubenswrapper[4925]: I0121 11:54:09.025859 4925 generic.go:334] "Generic (PLEG): container finished" podID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerID="0dba0e2dfc9d3be3b4e4f99bae7fc92ba1814eec74d1fb2d9e70d30123878dc5" exitCode=0 Jan 21 11:54:09 crc kubenswrapper[4925]: I0121 11:54:09.025980 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p72m7" event={"ID":"5b3bb4bd-7927-4afa-b7cb-35199e80726f","Type":"ContainerDied","Data":"0dba0e2dfc9d3be3b4e4f99bae7fc92ba1814eec74d1fb2d9e70d30123878dc5"} Jan 21 11:54:09 crc kubenswrapper[4925]: I0121 11:54:09.029459 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 11:54:11 crc kubenswrapper[4925]: I0121 11:54:11.051787 4925 generic.go:334] "Generic (PLEG): container finished" podID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerID="e08daf71ee5dd942d2cb49aebbcdec27c20d2934e56d12d099e4798790a55d09" exitCode=0 Jan 21 11:54:11 crc kubenswrapper[4925]: I0121 11:54:11.051878 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p72m7" event={"ID":"5b3bb4bd-7927-4afa-b7cb-35199e80726f","Type":"ContainerDied","Data":"e08daf71ee5dd942d2cb49aebbcdec27c20d2934e56d12d099e4798790a55d09"} Jan 21 11:54:12 crc kubenswrapper[4925]: I0121 11:54:12.062294 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p72m7" event={"ID":"5b3bb4bd-7927-4afa-b7cb-35199e80726f","Type":"ContainerStarted","Data":"d1443e77c92dc01d52cb177e87559986ef5f3c49fd640c51f4fd61ac79c1c8e2"} Jan 21 11:54:12 crc kubenswrapper[4925]: I0121 11:54:12.092540 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p72m7" podStartSLOduration=3.635052775 podStartE2EDuration="6.092493311s" podCreationTimestamp="2026-01-21 11:54:06 +0000 UTC" firstStartedPulling="2026-01-21 11:54:09.028724975 +0000 UTC m=+3540.632616939" lastFinishedPulling="2026-01-21 11:54:11.486165541 +0000 UTC m=+3543.090057475" observedRunningTime="2026-01-21 11:54:12.088232815 +0000 UTC m=+3543.692124779" watchObservedRunningTime="2026-01-21 11:54:12.092493311 +0000 UTC m=+3543.696385255" Jan 21 11:54:17 crc kubenswrapper[4925]: I0121 11:54:17.286470 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:17 crc kubenswrapper[4925]: I0121 11:54:17.287565 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:17 crc kubenswrapper[4925]: I0121 11:54:17.362754 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:18 crc kubenswrapper[4925]: I0121 11:54:18.209067 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:21 crc kubenswrapper[4925]: I0121 11:54:21.539004 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p72m7"] Jan 21 11:54:21 crc kubenswrapper[4925]: I0121 11:54:21.539815 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p72m7" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="registry-server" containerID="cri-o://d1443e77c92dc01d52cb177e87559986ef5f3c49fd640c51f4fd61ac79c1c8e2" gracePeriod=2 Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.189739 4925 generic.go:334] "Generic (PLEG): container finished" podID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerID="d1443e77c92dc01d52cb177e87559986ef5f3c49fd640c51f4fd61ac79c1c8e2" exitCode=0 Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.189808 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p72m7" event={"ID":"5b3bb4bd-7927-4afa-b7cb-35199e80726f","Type":"ContainerDied","Data":"d1443e77c92dc01d52cb177e87559986ef5f3c49fd640c51f4fd61ac79c1c8e2"} Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.534439 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.703211 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-utilities\") pod \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.703316 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-catalog-content\") pod \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.703476 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppwtp\" (UniqueName: \"kubernetes.io/projected/5b3bb4bd-7927-4afa-b7cb-35199e80726f-kube-api-access-ppwtp\") pod \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\" (UID: \"5b3bb4bd-7927-4afa-b7cb-35199e80726f\") " Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.704490 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-utilities" (OuterVolumeSpecName: "utilities") pod "5b3bb4bd-7927-4afa-b7cb-35199e80726f" (UID: "5b3bb4bd-7927-4afa-b7cb-35199e80726f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.734909 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b3bb4bd-7927-4afa-b7cb-35199e80726f-kube-api-access-ppwtp" (OuterVolumeSpecName: "kube-api-access-ppwtp") pod "5b3bb4bd-7927-4afa-b7cb-35199e80726f" (UID: "5b3bb4bd-7927-4afa-b7cb-35199e80726f"). InnerVolumeSpecName "kube-api-access-ppwtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.757786 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b3bb4bd-7927-4afa-b7cb-35199e80726f" (UID: "5b3bb4bd-7927-4afa-b7cb-35199e80726f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.805992 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.806049 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3bb4bd-7927-4afa-b7cb-35199e80726f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:22 crc kubenswrapper[4925]: I0121 11:54:22.806065 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppwtp\" (UniqueName: \"kubernetes.io/projected/5b3bb4bd-7927-4afa-b7cb-35199e80726f-kube-api-access-ppwtp\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.208120 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p72m7" event={"ID":"5b3bb4bd-7927-4afa-b7cb-35199e80726f","Type":"ContainerDied","Data":"ba33c0d694ccfee6853d1cd086eeb895450e64d2c223cd528057c000054e22fd"} Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.208217 4925 scope.go:117] "RemoveContainer" containerID="d1443e77c92dc01d52cb177e87559986ef5f3c49fd640c51f4fd61ac79c1c8e2" Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.208224 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p72m7" Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.261608 4925 scope.go:117] "RemoveContainer" containerID="e08daf71ee5dd942d2cb49aebbcdec27c20d2934e56d12d099e4798790a55d09" Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.277516 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p72m7"] Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.283531 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p72m7"] Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.295952 4925 scope.go:117] "RemoveContainer" containerID="0dba0e2dfc9d3be3b4e4f99bae7fc92ba1814eec74d1fb2d9e70d30123878dc5" Jan 21 11:54:23 crc kubenswrapper[4925]: I0121 11:54:23.513882 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" path="/var/lib/kubelet/pods/5b3bb4bd-7927-4afa-b7cb-35199e80726f/volumes" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.348366 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bsdq4"] Jan 21 11:54:27 crc kubenswrapper[4925]: E0121 11:54:27.349074 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="registry-server" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.349090 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="registry-server" Jan 21 11:54:27 crc kubenswrapper[4925]: E0121 11:54:27.349116 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="extract-content" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.349122 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="extract-content" Jan 21 11:54:27 crc kubenswrapper[4925]: E0121 11:54:27.349132 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="extract-utilities" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.349138 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="extract-utilities" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.349292 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b3bb4bd-7927-4afa-b7cb-35199e80726f" containerName="registry-server" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.350574 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.375736 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bsdq4"] Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.520292 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-utilities\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.520375 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-catalog-content\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.520662 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bf9z6\" (UniqueName: \"kubernetes.io/projected/f5445095-aa3e-4a89-878f-2fafbc94cf18-kube-api-access-bf9z6\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.622989 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-utilities\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.623097 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-catalog-content\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.623159 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bf9z6\" (UniqueName: \"kubernetes.io/projected/f5445095-aa3e-4a89-878f-2fafbc94cf18-kube-api-access-bf9z6\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.623607 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-utilities\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.624016 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-catalog-content\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.657366 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bf9z6\" (UniqueName: \"kubernetes.io/projected/f5445095-aa3e-4a89-878f-2fafbc94cf18-kube-api-access-bf9z6\") pod \"redhat-operators-bsdq4\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.669185 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.963272 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2w5cr"] Jan 21 11:54:27 crc kubenswrapper[4925]: I0121 11:54:27.965495 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.003206 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2w5cr"] Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.167415 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p67tm\" (UniqueName: \"kubernetes.io/projected/55da4230-45df-4f91-b516-54393219e8b8-kube-api-access-p67tm\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.167717 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-catalog-content\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.167758 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-utilities\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.269469 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p67tm\" (UniqueName: \"kubernetes.io/projected/55da4230-45df-4f91-b516-54393219e8b8-kube-api-access-p67tm\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.269578 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-catalog-content\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.269719 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-utilities\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.270349 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-catalog-content\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.270379 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-utilities\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.294811 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p67tm\" (UniqueName: \"kubernetes.io/projected/55da4230-45df-4f91-b516-54393219e8b8-kube-api-access-p67tm\") pod \"community-operators-2w5cr\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.308973 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.372098 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bsdq4"] Jan 21 11:54:28 crc kubenswrapper[4925]: W0121 11:54:28.379062 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5445095_aa3e_4a89_878f_2fafbc94cf18.slice/crio-2411a9171daf293f8cb00763060189539eef37ad520bdd088a5df04d91484b4c WatchSource:0}: Error finding container 2411a9171daf293f8cb00763060189539eef37ad520bdd088a5df04d91484b4c: Status 404 returned error can't find the container with id 2411a9171daf293f8cb00763060189539eef37ad520bdd088a5df04d91484b4c Jan 21 11:54:28 crc kubenswrapper[4925]: I0121 11:54:28.645900 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2w5cr"] Jan 21 11:54:29 crc kubenswrapper[4925]: I0121 11:54:29.265353 4925 generic.go:334] "Generic (PLEG): container finished" podID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerID="38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff" exitCode=0 Jan 21 11:54:29 crc kubenswrapper[4925]: I0121 11:54:29.265618 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bsdq4" event={"ID":"f5445095-aa3e-4a89-878f-2fafbc94cf18","Type":"ContainerDied","Data":"38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff"} Jan 21 11:54:29 crc kubenswrapper[4925]: I0121 11:54:29.265647 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bsdq4" event={"ID":"f5445095-aa3e-4a89-878f-2fafbc94cf18","Type":"ContainerStarted","Data":"2411a9171daf293f8cb00763060189539eef37ad520bdd088a5df04d91484b4c"} Jan 21 11:54:29 crc kubenswrapper[4925]: I0121 11:54:29.275749 4925 generic.go:334] "Generic (PLEG): container finished" podID="55da4230-45df-4f91-b516-54393219e8b8" containerID="37c8aeb9de4297eb873a44701690d2f832c65266592d5d4760b0efecf684e45e" exitCode=0 Jan 21 11:54:29 crc kubenswrapper[4925]: I0121 11:54:29.275798 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w5cr" event={"ID":"55da4230-45df-4f91-b516-54393219e8b8","Type":"ContainerDied","Data":"37c8aeb9de4297eb873a44701690d2f832c65266592d5d4760b0efecf684e45e"} Jan 21 11:54:29 crc kubenswrapper[4925]: I0121 11:54:29.275823 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w5cr" event={"ID":"55da4230-45df-4f91-b516-54393219e8b8","Type":"ContainerStarted","Data":"045238a79bd8ccc256b79df53ff27ebe6c4f1d87c8d9e5dde770500c21b30393"} Jan 21 11:54:30 crc kubenswrapper[4925]: I0121 11:54:30.289477 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w5cr" event={"ID":"55da4230-45df-4f91-b516-54393219e8b8","Type":"ContainerStarted","Data":"f50a9e194a50645746ad89acf58132b2a5c6ceb9530075b797e5828c7163d151"} Jan 21 11:54:31 crc kubenswrapper[4925]: I0121 11:54:31.301214 4925 generic.go:334] "Generic (PLEG): container finished" podID="55da4230-45df-4f91-b516-54393219e8b8" containerID="f50a9e194a50645746ad89acf58132b2a5c6ceb9530075b797e5828c7163d151" exitCode=0 Jan 21 11:54:31 crc kubenswrapper[4925]: I0121 11:54:31.301270 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w5cr" event={"ID":"55da4230-45df-4f91-b516-54393219e8b8","Type":"ContainerDied","Data":"f50a9e194a50645746ad89acf58132b2a5c6ceb9530075b797e5828c7163d151"} Jan 21 11:54:31 crc kubenswrapper[4925]: I0121 11:54:31.305617 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bsdq4" event={"ID":"f5445095-aa3e-4a89-878f-2fafbc94cf18","Type":"ContainerStarted","Data":"7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71"} Jan 21 11:54:32 crc kubenswrapper[4925]: I0121 11:54:32.332806 4925 generic.go:334] "Generic (PLEG): container finished" podID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerID="7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71" exitCode=0 Jan 21 11:54:32 crc kubenswrapper[4925]: I0121 11:54:32.332861 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bsdq4" event={"ID":"f5445095-aa3e-4a89-878f-2fafbc94cf18","Type":"ContainerDied","Data":"7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71"} Jan 21 11:54:32 crc kubenswrapper[4925]: I0121 11:54:32.336046 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w5cr" event={"ID":"55da4230-45df-4f91-b516-54393219e8b8","Type":"ContainerStarted","Data":"e45f6e86b4de8f065251a851768e1e32eb852b7d0bd191c2f6e18ddb3d620c02"} Jan 21 11:54:32 crc kubenswrapper[4925]: I0121 11:54:32.387341 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2w5cr" podStartSLOduration=2.8681222699999998 podStartE2EDuration="5.387304831s" podCreationTimestamp="2026-01-21 11:54:27 +0000 UTC" firstStartedPulling="2026-01-21 11:54:29.277541112 +0000 UTC m=+3560.881433046" lastFinishedPulling="2026-01-21 11:54:31.796723633 +0000 UTC m=+3563.400615607" observedRunningTime="2026-01-21 11:54:32.375602088 +0000 UTC m=+3563.979494032" watchObservedRunningTime="2026-01-21 11:54:32.387304831 +0000 UTC m=+3563.991196805" Jan 21 11:54:33 crc kubenswrapper[4925]: I0121 11:54:33.348481 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bsdq4" event={"ID":"f5445095-aa3e-4a89-878f-2fafbc94cf18","Type":"ContainerStarted","Data":"393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36"} Jan 21 11:54:33 crc kubenswrapper[4925]: I0121 11:54:33.380135 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bsdq4" podStartSLOduration=2.901549943 podStartE2EDuration="6.380115907s" podCreationTimestamp="2026-01-21 11:54:27 +0000 UTC" firstStartedPulling="2026-01-21 11:54:29.267317108 +0000 UTC m=+3560.871209042" lastFinishedPulling="2026-01-21 11:54:32.745883052 +0000 UTC m=+3564.349775006" observedRunningTime="2026-01-21 11:54:33.375368567 +0000 UTC m=+3564.979260511" watchObservedRunningTime="2026-01-21 11:54:33.380115907 +0000 UTC m=+3564.984007841" Jan 21 11:54:37 crc kubenswrapper[4925]: I0121 11:54:37.669378 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:37 crc kubenswrapper[4925]: I0121 11:54:37.671340 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:38 crc kubenswrapper[4925]: I0121 11:54:38.309981 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:38 crc kubenswrapper[4925]: I0121 11:54:38.311398 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:38 crc kubenswrapper[4925]: I0121 11:54:38.391345 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:38 crc kubenswrapper[4925]: I0121 11:54:38.618423 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:38 crc kubenswrapper[4925]: I0121 11:54:38.725569 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bsdq4" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="registry-server" probeResult="failure" output=< Jan 21 11:54:38 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 11:54:38 crc kubenswrapper[4925]: > Jan 21 11:54:40 crc kubenswrapper[4925]: I0121 11:54:40.181128 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2w5cr"] Jan 21 11:54:41 crc kubenswrapper[4925]: I0121 11:54:41.608266 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2w5cr" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="registry-server" containerID="cri-o://e45f6e86b4de8f065251a851768e1e32eb852b7d0bd191c2f6e18ddb3d620c02" gracePeriod=2 Jan 21 11:54:42 crc kubenswrapper[4925]: I0121 11:54:42.631947 4925 generic.go:334] "Generic (PLEG): container finished" podID="55da4230-45df-4f91-b516-54393219e8b8" containerID="e45f6e86b4de8f065251a851768e1e32eb852b7d0bd191c2f6e18ddb3d620c02" exitCode=0 Jan 21 11:54:42 crc kubenswrapper[4925]: I0121 11:54:42.631996 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w5cr" event={"ID":"55da4230-45df-4f91-b516-54393219e8b8","Type":"ContainerDied","Data":"e45f6e86b4de8f065251a851768e1e32eb852b7d0bd191c2f6e18ddb3d620c02"} Jan 21 11:54:42 crc kubenswrapper[4925]: I0121 11:54:42.992250 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.102911 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-catalog-content\") pod \"55da4230-45df-4f91-b516-54393219e8b8\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.102960 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p67tm\" (UniqueName: \"kubernetes.io/projected/55da4230-45df-4f91-b516-54393219e8b8-kube-api-access-p67tm\") pod \"55da4230-45df-4f91-b516-54393219e8b8\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.103010 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-utilities\") pod \"55da4230-45df-4f91-b516-54393219e8b8\" (UID: \"55da4230-45df-4f91-b516-54393219e8b8\") " Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.104052 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-utilities" (OuterVolumeSpecName: "utilities") pod "55da4230-45df-4f91-b516-54393219e8b8" (UID: "55da4230-45df-4f91-b516-54393219e8b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.109728 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55da4230-45df-4f91-b516-54393219e8b8-kube-api-access-p67tm" (OuterVolumeSpecName: "kube-api-access-p67tm") pod "55da4230-45df-4f91-b516-54393219e8b8" (UID: "55da4230-45df-4f91-b516-54393219e8b8"). InnerVolumeSpecName "kube-api-access-p67tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.164626 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55da4230-45df-4f91-b516-54393219e8b8" (UID: "55da4230-45df-4f91-b516-54393219e8b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.205506 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.205560 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55da4230-45df-4f91-b516-54393219e8b8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.205575 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p67tm\" (UniqueName: \"kubernetes.io/projected/55da4230-45df-4f91-b516-54393219e8b8-kube-api-access-p67tm\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.646205 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w5cr" event={"ID":"55da4230-45df-4f91-b516-54393219e8b8","Type":"ContainerDied","Data":"045238a79bd8ccc256b79df53ff27ebe6c4f1d87c8d9e5dde770500c21b30393"} Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.646250 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w5cr" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.646277 4925 scope.go:117] "RemoveContainer" containerID="e45f6e86b4de8f065251a851768e1e32eb852b7d0bd191c2f6e18ddb3d620c02" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.671518 4925 scope.go:117] "RemoveContainer" containerID="f50a9e194a50645746ad89acf58132b2a5c6ceb9530075b797e5828c7163d151" Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.672607 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2w5cr"] Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.678990 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2w5cr"] Jan 21 11:54:43 crc kubenswrapper[4925]: I0121 11:54:43.702360 4925 scope.go:117] "RemoveContainer" containerID="37c8aeb9de4297eb873a44701690d2f832c65266592d5d4760b0efecf684e45e" Jan 21 11:54:45 crc kubenswrapper[4925]: I0121 11:54:45.515173 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55da4230-45df-4f91-b516-54393219e8b8" path="/var/lib/kubelet/pods/55da4230-45df-4f91-b516-54393219e8b8/volumes" Jan 21 11:54:47 crc kubenswrapper[4925]: I0121 11:54:47.723481 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:47 crc kubenswrapper[4925]: I0121 11:54:47.779144 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:51 crc kubenswrapper[4925]: I0121 11:54:51.345360 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bsdq4"] Jan 21 11:54:51 crc kubenswrapper[4925]: I0121 11:54:51.346457 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bsdq4" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="registry-server" containerID="cri-o://393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36" gracePeriod=2 Jan 21 11:54:51 crc kubenswrapper[4925]: I0121 11:54:51.869658 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.046515 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf9z6\" (UniqueName: \"kubernetes.io/projected/f5445095-aa3e-4a89-878f-2fafbc94cf18-kube-api-access-bf9z6\") pod \"f5445095-aa3e-4a89-878f-2fafbc94cf18\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.046754 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-catalog-content\") pod \"f5445095-aa3e-4a89-878f-2fafbc94cf18\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.046831 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-utilities\") pod \"f5445095-aa3e-4a89-878f-2fafbc94cf18\" (UID: \"f5445095-aa3e-4a89-878f-2fafbc94cf18\") " Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.048486 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-utilities" (OuterVolumeSpecName: "utilities") pod "f5445095-aa3e-4a89-878f-2fafbc94cf18" (UID: "f5445095-aa3e-4a89-878f-2fafbc94cf18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.073038 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5445095-aa3e-4a89-878f-2fafbc94cf18-kube-api-access-bf9z6" (OuterVolumeSpecName: "kube-api-access-bf9z6") pod "f5445095-aa3e-4a89-878f-2fafbc94cf18" (UID: "f5445095-aa3e-4a89-878f-2fafbc94cf18"). InnerVolumeSpecName "kube-api-access-bf9z6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.107483 4925 generic.go:334] "Generic (PLEG): container finished" podID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerID="393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36" exitCode=0 Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.107539 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bsdq4" event={"ID":"f5445095-aa3e-4a89-878f-2fafbc94cf18","Type":"ContainerDied","Data":"393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36"} Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.107632 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bsdq4" event={"ID":"f5445095-aa3e-4a89-878f-2fafbc94cf18","Type":"ContainerDied","Data":"2411a9171daf293f8cb00763060189539eef37ad520bdd088a5df04d91484b4c"} Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.107687 4925 scope.go:117] "RemoveContainer" containerID="393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.107560 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bsdq4" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.149844 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf9z6\" (UniqueName: \"kubernetes.io/projected/f5445095-aa3e-4a89-878f-2fafbc94cf18-kube-api-access-bf9z6\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.149916 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.162265 4925 scope.go:117] "RemoveContainer" containerID="7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.205783 4925 scope.go:117] "RemoveContainer" containerID="38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.347181 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5445095-aa3e-4a89-878f-2fafbc94cf18" (UID: "f5445095-aa3e-4a89-878f-2fafbc94cf18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.347768 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5445095-aa3e-4a89-878f-2fafbc94cf18-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.359212 4925 scope.go:117] "RemoveContainer" containerID="393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36" Jan 21 11:54:52 crc kubenswrapper[4925]: E0121 11:54:52.359786 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36\": container with ID starting with 393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36 not found: ID does not exist" containerID="393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.359899 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36"} err="failed to get container status \"393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36\": rpc error: code = NotFound desc = could not find container \"393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36\": container with ID starting with 393ca3b945a8bb89e1c5b5d544353e2f3491c7d38f871c7111d82540c7312e36 not found: ID does not exist" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.360126 4925 scope.go:117] "RemoveContainer" containerID="7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71" Jan 21 11:54:52 crc kubenswrapper[4925]: E0121 11:54:52.360618 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71\": container with ID starting with 7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71 not found: ID does not exist" containerID="7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.360653 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71"} err="failed to get container status \"7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71\": rpc error: code = NotFound desc = could not find container \"7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71\": container with ID starting with 7f7a1bbcae6807629d7b5e2ba682420ec9e26f5c70b4fc5228047cb3be72ef71 not found: ID does not exist" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.360685 4925 scope.go:117] "RemoveContainer" containerID="38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff" Jan 21 11:54:52 crc kubenswrapper[4925]: E0121 11:54:52.361093 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff\": container with ID starting with 38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff not found: ID does not exist" containerID="38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.361147 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff"} err="failed to get container status \"38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff\": rpc error: code = NotFound desc = could not find container \"38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff\": container with ID starting with 38a4576c5e1654637f51edcb0c98e79d08ba2f018b827c1ecdf8ccd3f1e863ff not found: ID does not exist" Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.455123 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bsdq4"] Jan 21 11:54:52 crc kubenswrapper[4925]: I0121 11:54:52.461950 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bsdq4"] Jan 21 11:54:53 crc kubenswrapper[4925]: I0121 11:54:53.515259 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" path="/var/lib/kubelet/pods/f5445095-aa3e-4a89-878f-2fafbc94cf18/volumes" Jan 21 11:55:49 crc kubenswrapper[4925]: I0121 11:55:49.941091 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:55:49 crc kubenswrapper[4925]: I0121 11:55:49.941829 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.354039 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-grcqw"] Jan 21 11:56:13 crc kubenswrapper[4925]: E0121 11:56:13.354881 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="extract-utilities" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.354896 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="extract-utilities" Jan 21 11:56:13 crc kubenswrapper[4925]: E0121 11:56:13.354915 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="registry-server" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.354922 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="registry-server" Jan 21 11:56:13 crc kubenswrapper[4925]: E0121 11:56:13.354946 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="extract-content" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.354952 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="extract-content" Jan 21 11:56:13 crc kubenswrapper[4925]: E0121 11:56:13.354969 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="extract-content" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.354975 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="extract-content" Jan 21 11:56:13 crc kubenswrapper[4925]: E0121 11:56:13.354993 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="registry-server" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.354999 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="registry-server" Jan 21 11:56:13 crc kubenswrapper[4925]: E0121 11:56:13.355008 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="extract-utilities" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.355013 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="extract-utilities" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.355191 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="55da4230-45df-4f91-b516-54393219e8b8" containerName="registry-server" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.355214 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5445095-aa3e-4a89-878f-2fafbc94cf18" containerName="registry-server" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.356641 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.361570 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdk2k\" (UniqueName: \"kubernetes.io/projected/02ae2728-31f2-4a4e-b794-71092068ffe1-kube-api-access-fdk2k\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.361666 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-catalog-content\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.361708 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-utilities\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.379027 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-grcqw"] Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.595776 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-catalog-content\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.595844 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-utilities\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.595899 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdk2k\" (UniqueName: \"kubernetes.io/projected/02ae2728-31f2-4a4e-b794-71092068ffe1-kube-api-access-fdk2k\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.596672 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-catalog-content\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.597246 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-utilities\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.660947 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdk2k\" (UniqueName: \"kubernetes.io/projected/02ae2728-31f2-4a4e-b794-71092068ffe1-kube-api-access-fdk2k\") pod \"redhat-marketplace-grcqw\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:13 crc kubenswrapper[4925]: I0121 11:56:13.721521 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:14 crc kubenswrapper[4925]: I0121 11:56:14.261454 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-grcqw"] Jan 21 11:56:15 crc kubenswrapper[4925]: I0121 11:56:15.125044 4925 generic.go:334] "Generic (PLEG): container finished" podID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerID="4ad60de69bb102cfbeb18b4abf9cb384d08e1e96a5739ad524628df8a17fe4b3" exitCode=0 Jan 21 11:56:15 crc kubenswrapper[4925]: I0121 11:56:15.125298 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grcqw" event={"ID":"02ae2728-31f2-4a4e-b794-71092068ffe1","Type":"ContainerDied","Data":"4ad60de69bb102cfbeb18b4abf9cb384d08e1e96a5739ad524628df8a17fe4b3"} Jan 21 11:56:15 crc kubenswrapper[4925]: I0121 11:56:15.125364 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grcqw" event={"ID":"02ae2728-31f2-4a4e-b794-71092068ffe1","Type":"ContainerStarted","Data":"0282f89147fd5ad1142f8b84f3a18b81b376b9f7f146b2d1acde32024c024db0"} Jan 21 11:56:19 crc kubenswrapper[4925]: I0121 11:56:19.940636 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:56:19 crc kubenswrapper[4925]: I0121 11:56:19.941464 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:56:25 crc kubenswrapper[4925]: E0121 11:56:25.244805 4925 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://registry.redhat.io/redhat/redhat-marketplace-index:v4.18: Requesting bearer token: invalid status code from registry 504 (Gateway Timeout)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 21 11:56:25 crc kubenswrapper[4925]: E0121 11:56:25.245618 4925 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fdk2k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-grcqw_openshift-marketplace(02ae2728-31f2-4a4e-b794-71092068ffe1): ErrImagePull: initializing source docker://registry.redhat.io/redhat/redhat-marketplace-index:v4.18: Requesting bearer token: invalid status code from registry 504 (Gateway Timeout)" logger="UnhandledError" Jan 21 11:56:25 crc kubenswrapper[4925]: E0121 11:56:25.246851 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"initializing source docker://registry.redhat.io/redhat/redhat-marketplace-index:v4.18: Requesting bearer token: invalid status code from registry 504 (Gateway Timeout)\"" pod="openshift-marketplace/redhat-marketplace-grcqw" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" Jan 21 11:56:26 crc kubenswrapper[4925]: E0121 11:56:26.252749 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-grcqw" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" Jan 21 11:56:42 crc kubenswrapper[4925]: I0121 11:56:42.607126 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grcqw" event={"ID":"02ae2728-31f2-4a4e-b794-71092068ffe1","Type":"ContainerStarted","Data":"fccde033883ab52560bc6e7b1631febfbf041f45fc28c4b54834a04ba204eef3"} Jan 21 11:56:43 crc kubenswrapper[4925]: I0121 11:56:43.637213 4925 generic.go:334] "Generic (PLEG): container finished" podID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerID="fccde033883ab52560bc6e7b1631febfbf041f45fc28c4b54834a04ba204eef3" exitCode=0 Jan 21 11:56:43 crc kubenswrapper[4925]: I0121 11:56:43.637548 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grcqw" event={"ID":"02ae2728-31f2-4a4e-b794-71092068ffe1","Type":"ContainerDied","Data":"fccde033883ab52560bc6e7b1631febfbf041f45fc28c4b54834a04ba204eef3"} Jan 21 11:56:44 crc kubenswrapper[4925]: I0121 11:56:44.649416 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grcqw" event={"ID":"02ae2728-31f2-4a4e-b794-71092068ffe1","Type":"ContainerStarted","Data":"e6c6fb3704a68a4db1bb421e49c3c9a458ebd3c99abb4731a35407f7e5b91731"} Jan 21 11:56:49 crc kubenswrapper[4925]: I0121 11:56:49.940846 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:56:49 crc kubenswrapper[4925]: I0121 11:56:49.941790 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:56:49 crc kubenswrapper[4925]: I0121 11:56:49.941901 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 11:56:49 crc kubenswrapper[4925]: I0121 11:56:49.942848 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bb30684795a391256ad4ef320afe2396f497f78bec3c7653ede220c3029ce93b"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 11:56:49 crc kubenswrapper[4925]: I0121 11:56:49.942905 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://bb30684795a391256ad4ef320afe2396f497f78bec3c7653ede220c3029ce93b" gracePeriod=600 Jan 21 11:56:50 crc kubenswrapper[4925]: I0121 11:56:50.714554 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="bb30684795a391256ad4ef320afe2396f497f78bec3c7653ede220c3029ce93b" exitCode=0 Jan 21 11:56:50 crc kubenswrapper[4925]: I0121 11:56:50.714828 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"bb30684795a391256ad4ef320afe2396f497f78bec3c7653ede220c3029ce93b"} Jan 21 11:56:50 crc kubenswrapper[4925]: I0121 11:56:50.714984 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875"} Jan 21 11:56:50 crc kubenswrapper[4925]: I0121 11:56:50.715015 4925 scope.go:117] "RemoveContainer" containerID="4312b997f7248020dbcbaf6051420a3ffe7251907298451caf1b649cebc65dab" Jan 21 11:56:50 crc kubenswrapper[4925]: I0121 11:56:50.734925 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-grcqw" podStartSLOduration=8.791777837 podStartE2EDuration="37.734898376s" podCreationTimestamp="2026-01-21 11:56:13 +0000 UTC" firstStartedPulling="2026-01-21 11:56:15.127666469 +0000 UTC m=+3666.731558423" lastFinishedPulling="2026-01-21 11:56:44.070787028 +0000 UTC m=+3695.674678962" observedRunningTime="2026-01-21 11:56:44.687171959 +0000 UTC m=+3696.291063893" watchObservedRunningTime="2026-01-21 11:56:50.734898376 +0000 UTC m=+3702.338790310" Jan 21 11:56:53 crc kubenswrapper[4925]: I0121 11:56:53.721786 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:53 crc kubenswrapper[4925]: I0121 11:56:53.722509 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:53 crc kubenswrapper[4925]: I0121 11:56:53.782367 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:53 crc kubenswrapper[4925]: I0121 11:56:53.840614 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:57 crc kubenswrapper[4925]: I0121 11:56:57.346599 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-grcqw"] Jan 21 11:56:57 crc kubenswrapper[4925]: I0121 11:56:57.347500 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-grcqw" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="registry-server" containerID="cri-o://e6c6fb3704a68a4db1bb421e49c3c9a458ebd3c99abb4731a35407f7e5b91731" gracePeriod=2 Jan 21 11:56:57 crc kubenswrapper[4925]: I0121 11:56:57.792977 4925 generic.go:334] "Generic (PLEG): container finished" podID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerID="e6c6fb3704a68a4db1bb421e49c3c9a458ebd3c99abb4731a35407f7e5b91731" exitCode=0 Jan 21 11:56:57 crc kubenswrapper[4925]: I0121 11:56:57.793342 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grcqw" event={"ID":"02ae2728-31f2-4a4e-b794-71092068ffe1","Type":"ContainerDied","Data":"e6c6fb3704a68a4db1bb421e49c3c9a458ebd3c99abb4731a35407f7e5b91731"} Jan 21 11:56:57 crc kubenswrapper[4925]: I0121 11:56:57.793377 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grcqw" event={"ID":"02ae2728-31f2-4a4e-b794-71092068ffe1","Type":"ContainerDied","Data":"0282f89147fd5ad1142f8b84f3a18b81b376b9f7f146b2d1acde32024c024db0"} Jan 21 11:56:57 crc kubenswrapper[4925]: I0121 11:56:57.793422 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0282f89147fd5ad1142f8b84f3a18b81b376b9f7f146b2d1acde32024c024db0" Jan 21 11:56:57 crc kubenswrapper[4925]: I0121 11:56:57.831271 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.167517 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-catalog-content\") pod \"02ae2728-31f2-4a4e-b794-71092068ffe1\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.167638 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdk2k\" (UniqueName: \"kubernetes.io/projected/02ae2728-31f2-4a4e-b794-71092068ffe1-kube-api-access-fdk2k\") pod \"02ae2728-31f2-4a4e-b794-71092068ffe1\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.167732 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-utilities\") pod \"02ae2728-31f2-4a4e-b794-71092068ffe1\" (UID: \"02ae2728-31f2-4a4e-b794-71092068ffe1\") " Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.175348 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-utilities" (OuterVolumeSpecName: "utilities") pod "02ae2728-31f2-4a4e-b794-71092068ffe1" (UID: "02ae2728-31f2-4a4e-b794-71092068ffe1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.184796 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02ae2728-31f2-4a4e-b794-71092068ffe1-kube-api-access-fdk2k" (OuterVolumeSpecName: "kube-api-access-fdk2k") pod "02ae2728-31f2-4a4e-b794-71092068ffe1" (UID: "02ae2728-31f2-4a4e-b794-71092068ffe1"). InnerVolumeSpecName "kube-api-access-fdk2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.239350 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "02ae2728-31f2-4a4e-b794-71092068ffe1" (UID: "02ae2728-31f2-4a4e-b794-71092068ffe1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.269913 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.270271 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02ae2728-31f2-4a4e-b794-71092068ffe1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.270441 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdk2k\" (UniqueName: \"kubernetes.io/projected/02ae2728-31f2-4a4e-b794-71092068ffe1-kube-api-access-fdk2k\") on node \"crc\" DevicePath \"\"" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.802948 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-grcqw" Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.838318 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-grcqw"] Jan 21 11:56:58 crc kubenswrapper[4925]: I0121 11:56:58.844948 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-grcqw"] Jan 21 11:56:59 crc kubenswrapper[4925]: I0121 11:56:59.513630 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" path="/var/lib/kubelet/pods/02ae2728-31f2-4a4e-b794-71092068ffe1/volumes" Jan 21 11:59:19 crc kubenswrapper[4925]: I0121 11:59:19.941594 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:59:19 crc kubenswrapper[4925]: I0121 11:59:19.942243 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 11:59:49 crc kubenswrapper[4925]: I0121 11:59:49.941014 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 11:59:49 crc kubenswrapper[4925]: I0121 11:59:49.941766 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.202701 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2"] Jan 21 12:00:00 crc kubenswrapper[4925]: E0121 12:00:00.206813 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="registry-server" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.206857 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="registry-server" Jan 21 12:00:00 crc kubenswrapper[4925]: E0121 12:00:00.206930 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="extract-content" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.206940 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="extract-content" Jan 21 12:00:00 crc kubenswrapper[4925]: E0121 12:00:00.206991 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="extract-utilities" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.207010 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="extract-utilities" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.207595 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="02ae2728-31f2-4a4e-b794-71092068ffe1" containerName="registry-server" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.209989 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.223466 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.224664 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.229525 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4jrv\" (UniqueName: \"kubernetes.io/projected/b7b5dd29-371b-47b9-8b77-a0395637b4d9-kube-api-access-m4jrv\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.230127 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7b5dd29-371b-47b9-8b77-a0395637b4d9-config-volume\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.230337 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7b5dd29-371b-47b9-8b77-a0395637b4d9-secret-volume\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.236786 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2"] Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.332366 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7b5dd29-371b-47b9-8b77-a0395637b4d9-secret-volume\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.332482 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4jrv\" (UniqueName: \"kubernetes.io/projected/b7b5dd29-371b-47b9-8b77-a0395637b4d9-kube-api-access-m4jrv\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.332540 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7b5dd29-371b-47b9-8b77-a0395637b4d9-config-volume\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.333756 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7b5dd29-371b-47b9-8b77-a0395637b4d9-config-volume\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.352849 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4jrv\" (UniqueName: \"kubernetes.io/projected/b7b5dd29-371b-47b9-8b77-a0395637b4d9-kube-api-access-m4jrv\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.378833 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7b5dd29-371b-47b9-8b77-a0395637b4d9-secret-volume\") pod \"collect-profiles-29483280-2nvt2\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:00 crc kubenswrapper[4925]: I0121 12:00:00.551810 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:01 crc kubenswrapper[4925]: I0121 12:00:01.301460 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2"] Jan 21 12:00:02 crc kubenswrapper[4925]: I0121 12:00:02.329800 4925 generic.go:334] "Generic (PLEG): container finished" podID="b7b5dd29-371b-47b9-8b77-a0395637b4d9" containerID="359fef676524854182de00b3e0724d7bbd90dcd896b73b498fab4b9151ba270c" exitCode=0 Jan 21 12:00:02 crc kubenswrapper[4925]: I0121 12:00:02.330086 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" event={"ID":"b7b5dd29-371b-47b9-8b77-a0395637b4d9","Type":"ContainerDied","Data":"359fef676524854182de00b3e0724d7bbd90dcd896b73b498fab4b9151ba270c"} Jan 21 12:00:02 crc kubenswrapper[4925]: I0121 12:00:02.330119 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" event={"ID":"b7b5dd29-371b-47b9-8b77-a0395637b4d9","Type":"ContainerStarted","Data":"56aceeb7ada9c96f9bd124f3960e48f619711810eaaf0fccc41d2192beaec30a"} Jan 21 12:00:03 crc kubenswrapper[4925]: I0121 12:00:03.701564 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:03 crc kubenswrapper[4925]: I0121 12:00:03.898133 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7b5dd29-371b-47b9-8b77-a0395637b4d9-config-volume\") pod \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " Jan 21 12:00:03 crc kubenswrapper[4925]: I0121 12:00:03.898284 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4jrv\" (UniqueName: \"kubernetes.io/projected/b7b5dd29-371b-47b9-8b77-a0395637b4d9-kube-api-access-m4jrv\") pod \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " Jan 21 12:00:03 crc kubenswrapper[4925]: I0121 12:00:03.898370 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7b5dd29-371b-47b9-8b77-a0395637b4d9-secret-volume\") pod \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\" (UID: \"b7b5dd29-371b-47b9-8b77-a0395637b4d9\") " Jan 21 12:00:03 crc kubenswrapper[4925]: I0121 12:00:03.899109 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7b5dd29-371b-47b9-8b77-a0395637b4d9-config-volume" (OuterVolumeSpecName: "config-volume") pod "b7b5dd29-371b-47b9-8b77-a0395637b4d9" (UID: "b7b5dd29-371b-47b9-8b77-a0395637b4d9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 12:00:03 crc kubenswrapper[4925]: I0121 12:00:03.909795 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7b5dd29-371b-47b9-8b77-a0395637b4d9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b7b5dd29-371b-47b9-8b77-a0395637b4d9" (UID: "b7b5dd29-371b-47b9-8b77-a0395637b4d9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 12:00:03 crc kubenswrapper[4925]: I0121 12:00:03.909944 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7b5dd29-371b-47b9-8b77-a0395637b4d9-kube-api-access-m4jrv" (OuterVolumeSpecName: "kube-api-access-m4jrv") pod "b7b5dd29-371b-47b9-8b77-a0395637b4d9" (UID: "b7b5dd29-371b-47b9-8b77-a0395637b4d9"). InnerVolumeSpecName "kube-api-access-m4jrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.000789 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b7b5dd29-371b-47b9-8b77-a0395637b4d9-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.000829 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4jrv\" (UniqueName: \"kubernetes.io/projected/b7b5dd29-371b-47b9-8b77-a0395637b4d9-kube-api-access-m4jrv\") on node \"crc\" DevicePath \"\"" Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.000841 4925 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b7b5dd29-371b-47b9-8b77-a0395637b4d9-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.360610 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" event={"ID":"b7b5dd29-371b-47b9-8b77-a0395637b4d9","Type":"ContainerDied","Data":"56aceeb7ada9c96f9bd124f3960e48f619711810eaaf0fccc41d2192beaec30a"} Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.360696 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56aceeb7ada9c96f9bd124f3960e48f619711810eaaf0fccc41d2192beaec30a" Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.360792 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483280-2nvt2" Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.795507 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk"] Jan 21 12:00:04 crc kubenswrapper[4925]: I0121 12:00:04.806418 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483235-qndhk"] Jan 21 12:00:05 crc kubenswrapper[4925]: I0121 12:00:05.512106 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6b514e2-7001-49cd-8e45-7a3333c7d25a" path="/var/lib/kubelet/pods/f6b514e2-7001-49cd-8e45-7a3333c7d25a/volumes" Jan 21 12:00:19 crc kubenswrapper[4925]: I0121 12:00:19.940750 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:00:19 crc kubenswrapper[4925]: I0121 12:00:19.941755 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:00:19 crc kubenswrapper[4925]: I0121 12:00:19.942032 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 12:00:19 crc kubenswrapper[4925]: I0121 12:00:19.943314 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 12:00:19 crc kubenswrapper[4925]: I0121 12:00:19.943518 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" gracePeriod=600 Jan 21 12:00:20 crc kubenswrapper[4925]: E0121 12:00:20.085550 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:00:20 crc kubenswrapper[4925]: I0121 12:00:20.630130 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" exitCode=0 Jan 21 12:00:20 crc kubenswrapper[4925]: I0121 12:00:20.630177 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875"} Jan 21 12:00:20 crc kubenswrapper[4925]: I0121 12:00:20.630247 4925 scope.go:117] "RemoveContainer" containerID="bb30684795a391256ad4ef320afe2396f497f78bec3c7653ede220c3029ce93b" Jan 21 12:00:20 crc kubenswrapper[4925]: I0121 12:00:20.631179 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:00:20 crc kubenswrapper[4925]: E0121 12:00:20.631574 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:00:34 crc kubenswrapper[4925]: I0121 12:00:34.290999 4925 scope.go:117] "RemoveContainer" containerID="bc98af7d83ac652093940b525ffa759385bec47c7a3952b84cc66c8fd5f9577f" Jan 21 12:00:35 crc kubenswrapper[4925]: I0121 12:00:35.503929 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:00:35 crc kubenswrapper[4925]: E0121 12:00:35.504164 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:00:46 crc kubenswrapper[4925]: I0121 12:00:46.572888 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:00:46 crc kubenswrapper[4925]: E0121 12:00:46.573772 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.159487 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["watcher-kuttl-default/keystone-cron-29483281-2xgjf"] Jan 21 12:01:00 crc kubenswrapper[4925]: E0121 12:01:00.160301 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7b5dd29-371b-47b9-8b77-a0395637b4d9" containerName="collect-profiles" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.160316 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7b5dd29-371b-47b9-8b77-a0395637b4d9" containerName="collect-profiles" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.160568 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7b5dd29-371b-47b9-8b77-a0395637b4d9" containerName="collect-profiles" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.161266 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.173657 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-cron-29483281-2xgjf"] Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.213023 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-fernet-keys\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.213135 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-combined-ca-bundle\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.213194 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-cert-memcached-mtls\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.213230 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-config-data\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.213589 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbvvk\" (UniqueName: \"kubernetes.io/projected/a579ba5c-cb85-4070-a2ca-f2061d14e311-kube-api-access-lbvvk\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.315916 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbvvk\" (UniqueName: \"kubernetes.io/projected/a579ba5c-cb85-4070-a2ca-f2061d14e311-kube-api-access-lbvvk\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.316011 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-fernet-keys\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.316087 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-combined-ca-bundle\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.316137 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-cert-memcached-mtls\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.316195 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-config-data\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.323155 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-fernet-keys\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.323769 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-cert-memcached-mtls\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.324394 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-config-data\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.328922 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-combined-ca-bundle\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.334355 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbvvk\" (UniqueName: \"kubernetes.io/projected/a579ba5c-cb85-4070-a2ca-f2061d14e311-kube-api-access-lbvvk\") pod \"keystone-cron-29483281-2xgjf\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:00 crc kubenswrapper[4925]: I0121 12:01:00.486562 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:01 crc kubenswrapper[4925]: I0121 12:01:01.014563 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["watcher-kuttl-default/keystone-cron-29483281-2xgjf"] Jan 21 12:01:01 crc kubenswrapper[4925]: I0121 12:01:01.508196 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:01:01 crc kubenswrapper[4925]: E0121 12:01:01.508836 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:01:01 crc kubenswrapper[4925]: I0121 12:01:01.529690 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" event={"ID":"a579ba5c-cb85-4070-a2ca-f2061d14e311","Type":"ContainerStarted","Data":"bad99b2f4a9047ccc1b1d30e34ce292672286e6c0bf9f1582820a218b3a31f82"} Jan 21 12:01:01 crc kubenswrapper[4925]: I0121 12:01:01.529785 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" event={"ID":"a579ba5c-cb85-4070-a2ca-f2061d14e311","Type":"ContainerStarted","Data":"8a0d54bc213fd0391aa368f15e4c6ca41a51bfe63be415d8ef03a4d4eba3bf75"} Jan 21 12:01:05 crc kubenswrapper[4925]: I0121 12:01:05.569492 4925 generic.go:334] "Generic (PLEG): container finished" podID="a579ba5c-cb85-4070-a2ca-f2061d14e311" containerID="bad99b2f4a9047ccc1b1d30e34ce292672286e6c0bf9f1582820a218b3a31f82" exitCode=0 Jan 21 12:01:05 crc kubenswrapper[4925]: I0121 12:01:05.569575 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" event={"ID":"a579ba5c-cb85-4070-a2ca-f2061d14e311","Type":"ContainerDied","Data":"bad99b2f4a9047ccc1b1d30e34ce292672286e6c0bf9f1582820a218b3a31f82"} Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.082031 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.122222 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-cert-memcached-mtls\") pod \"a579ba5c-cb85-4070-a2ca-f2061d14e311\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.122357 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-combined-ca-bundle\") pod \"a579ba5c-cb85-4070-a2ca-f2061d14e311\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.122432 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-config-data\") pod \"a579ba5c-cb85-4070-a2ca-f2061d14e311\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.122463 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-fernet-keys\") pod \"a579ba5c-cb85-4070-a2ca-f2061d14e311\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.122560 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbvvk\" (UniqueName: \"kubernetes.io/projected/a579ba5c-cb85-4070-a2ca-f2061d14e311-kube-api-access-lbvvk\") pod \"a579ba5c-cb85-4070-a2ca-f2061d14e311\" (UID: \"a579ba5c-cb85-4070-a2ca-f2061d14e311\") " Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.133809 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a579ba5c-cb85-4070-a2ca-f2061d14e311" (UID: "a579ba5c-cb85-4070-a2ca-f2061d14e311"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.136051 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a579ba5c-cb85-4070-a2ca-f2061d14e311-kube-api-access-lbvvk" (OuterVolumeSpecName: "kube-api-access-lbvvk") pod "a579ba5c-cb85-4070-a2ca-f2061d14e311" (UID: "a579ba5c-cb85-4070-a2ca-f2061d14e311"). InnerVolumeSpecName "kube-api-access-lbvvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.149320 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a579ba5c-cb85-4070-a2ca-f2061d14e311" (UID: "a579ba5c-cb85-4070-a2ca-f2061d14e311"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.168930 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-config-data" (OuterVolumeSpecName: "config-data") pod "a579ba5c-cb85-4070-a2ca-f2061d14e311" (UID: "a579ba5c-cb85-4070-a2ca-f2061d14e311"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.214516 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-cert-memcached-mtls" (OuterVolumeSpecName: "cert-memcached-mtls") pod "a579ba5c-cb85-4070-a2ca-f2061d14e311" (UID: "a579ba5c-cb85-4070-a2ca-f2061d14e311"). InnerVolumeSpecName "cert-memcached-mtls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.226322 4925 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.226378 4925 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.226413 4925 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.226429 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbvvk\" (UniqueName: \"kubernetes.io/projected/a579ba5c-cb85-4070-a2ca-f2061d14e311-kube-api-access-lbvvk\") on node \"crc\" DevicePath \"\"" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.226446 4925 reconciler_common.go:293] "Volume detached for volume \"cert-memcached-mtls\" (UniqueName: \"kubernetes.io/secret/a579ba5c-cb85-4070-a2ca-f2061d14e311-cert-memcached-mtls\") on node \"crc\" DevicePath \"\"" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.589721 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" event={"ID":"a579ba5c-cb85-4070-a2ca-f2061d14e311","Type":"ContainerDied","Data":"8a0d54bc213fd0391aa368f15e4c6ca41a51bfe63be415d8ef03a4d4eba3bf75"} Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.589781 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a0d54bc213fd0391aa368f15e4c6ca41a51bfe63be415d8ef03a4d4eba3bf75" Jan 21 12:01:07 crc kubenswrapper[4925]: I0121 12:01:07.589794 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="watcher-kuttl-default/keystone-cron-29483281-2xgjf" Jan 21 12:01:14 crc kubenswrapper[4925]: I0121 12:01:14.581618 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:01:14 crc kubenswrapper[4925]: E0121 12:01:14.584807 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:01:25 crc kubenswrapper[4925]: I0121 12:01:25.502515 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:01:25 crc kubenswrapper[4925]: E0121 12:01:25.503350 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:01:39 crc kubenswrapper[4925]: I0121 12:01:39.508995 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:01:39 crc kubenswrapper[4925]: E0121 12:01:39.510243 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:01:54 crc kubenswrapper[4925]: I0121 12:01:54.501875 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:01:54 crc kubenswrapper[4925]: E0121 12:01:54.502632 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:02:06 crc kubenswrapper[4925]: I0121 12:02:06.502015 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:02:06 crc kubenswrapper[4925]: E0121 12:02:06.502770 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:02:21 crc kubenswrapper[4925]: I0121 12:02:21.507585 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:02:21 crc kubenswrapper[4925]: E0121 12:02:21.509734 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:02:33 crc kubenswrapper[4925]: I0121 12:02:33.502803 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:02:33 crc kubenswrapper[4925]: E0121 12:02:33.503627 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:02:34 crc kubenswrapper[4925]: I0121 12:02:34.546880 4925 scope.go:117] "RemoveContainer" containerID="4ad60de69bb102cfbeb18b4abf9cb384d08e1e96a5739ad524628df8a17fe4b3" Jan 21 12:02:47 crc kubenswrapper[4925]: I0121 12:02:47.502375 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:02:47 crc kubenswrapper[4925]: E0121 12:02:47.503331 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:02:58 crc kubenswrapper[4925]: I0121 12:02:58.501238 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:02:58 crc kubenswrapper[4925]: E0121 12:02:58.501951 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:03:09 crc kubenswrapper[4925]: I0121 12:03:09.540101 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:03:09 crc kubenswrapper[4925]: E0121 12:03:09.541008 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:03:22 crc kubenswrapper[4925]: I0121 12:03:22.665609 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:03:22 crc kubenswrapper[4925]: E0121 12:03:22.666788 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:03:34 crc kubenswrapper[4925]: I0121 12:03:34.501311 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:03:34 crc kubenswrapper[4925]: E0121 12:03:34.502061 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:03:34 crc kubenswrapper[4925]: I0121 12:03:34.600117 4925 scope.go:117] "RemoveContainer" containerID="e6c6fb3704a68a4db1bb421e49c3c9a458ebd3c99abb4731a35407f7e5b91731" Jan 21 12:03:34 crc kubenswrapper[4925]: I0121 12:03:34.626812 4925 scope.go:117] "RemoveContainer" containerID="fccde033883ab52560bc6e7b1631febfbf041f45fc28c4b54834a04ba204eef3" Jan 21 12:03:48 crc kubenswrapper[4925]: I0121 12:03:48.501700 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:03:48 crc kubenswrapper[4925]: E0121 12:03:48.504034 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:04:02 crc kubenswrapper[4925]: I0121 12:04:02.502761 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:04:02 crc kubenswrapper[4925]: E0121 12:04:02.503912 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:04:15 crc kubenswrapper[4925]: I0121 12:04:15.502872 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:04:15 crc kubenswrapper[4925]: E0121 12:04:15.503656 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:04:27 crc kubenswrapper[4925]: I0121 12:04:27.502428 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:04:27 crc kubenswrapper[4925]: E0121 12:04:27.503211 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.165042 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xm77b"] Jan 21 12:04:37 crc kubenswrapper[4925]: E0121 12:04:37.168639 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a579ba5c-cb85-4070-a2ca-f2061d14e311" containerName="keystone-cron" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.168775 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="a579ba5c-cb85-4070-a2ca-f2061d14e311" containerName="keystone-cron" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.169048 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="a579ba5c-cb85-4070-a2ca-f2061d14e311" containerName="keystone-cron" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.170778 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.186579 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xm77b"] Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.327008 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-catalog-content\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.327618 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-utilities\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.327828 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdrjh\" (UniqueName: \"kubernetes.io/projected/3abe4c19-e9c9-4809-b635-4b89de965d82-kube-api-access-gdrjh\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.429214 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-utilities\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.429330 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdrjh\" (UniqueName: \"kubernetes.io/projected/3abe4c19-e9c9-4809-b635-4b89de965d82-kube-api-access-gdrjh\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.429413 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-catalog-content\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.430352 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-utilities\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.430448 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-catalog-content\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.461531 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdrjh\" (UniqueName: \"kubernetes.io/projected/3abe4c19-e9c9-4809-b635-4b89de965d82-kube-api-access-gdrjh\") pod \"certified-operators-xm77b\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:37 crc kubenswrapper[4925]: I0121 12:04:37.507745 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:38 crc kubenswrapper[4925]: I0121 12:04:38.355149 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xm77b"] Jan 21 12:04:38 crc kubenswrapper[4925]: I0121 12:04:38.950618 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xm77b" event={"ID":"3abe4c19-e9c9-4809-b635-4b89de965d82","Type":"ContainerStarted","Data":"d10c6b3d4276c3ddf850676cd5785cfc527167ded47f9f7cf4b3769b59f3af4b"} Jan 21 12:04:39 crc kubenswrapper[4925]: I0121 12:04:39.962135 4925 generic.go:334] "Generic (PLEG): container finished" podID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerID="ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757" exitCode=0 Jan 21 12:04:39 crc kubenswrapper[4925]: I0121 12:04:39.962458 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xm77b" event={"ID":"3abe4c19-e9c9-4809-b635-4b89de965d82","Type":"ContainerDied","Data":"ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757"} Jan 21 12:04:39 crc kubenswrapper[4925]: I0121 12:04:39.965551 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 12:04:41 crc kubenswrapper[4925]: I0121 12:04:41.526755 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:04:41 crc kubenswrapper[4925]: E0121 12:04:41.527285 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:04:41 crc kubenswrapper[4925]: I0121 12:04:41.989551 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xm77b" event={"ID":"3abe4c19-e9c9-4809-b635-4b89de965d82","Type":"ContainerStarted","Data":"da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c"} Jan 21 12:04:43 crc kubenswrapper[4925]: I0121 12:04:43.085911 4925 generic.go:334] "Generic (PLEG): container finished" podID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerID="da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c" exitCode=0 Jan 21 12:04:43 crc kubenswrapper[4925]: I0121 12:04:43.085969 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xm77b" event={"ID":"3abe4c19-e9c9-4809-b635-4b89de965d82","Type":"ContainerDied","Data":"da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c"} Jan 21 12:04:45 crc kubenswrapper[4925]: I0121 12:04:45.107617 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xm77b" event={"ID":"3abe4c19-e9c9-4809-b635-4b89de965d82","Type":"ContainerStarted","Data":"b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68"} Jan 21 12:04:46 crc kubenswrapper[4925]: I0121 12:04:46.135779 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xm77b" podStartSLOduration=5.46988274 podStartE2EDuration="9.135754734s" podCreationTimestamp="2026-01-21 12:04:37 +0000 UTC" firstStartedPulling="2026-01-21 12:04:39.965159719 +0000 UTC m=+4171.569051653" lastFinishedPulling="2026-01-21 12:04:43.631031703 +0000 UTC m=+4175.234923647" observedRunningTime="2026-01-21 12:04:46.130709125 +0000 UTC m=+4177.734601059" watchObservedRunningTime="2026-01-21 12:04:46.135754734 +0000 UTC m=+4177.739646668" Jan 21 12:04:47 crc kubenswrapper[4925]: I0121 12:04:47.512782 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:47 crc kubenswrapper[4925]: I0121 12:04:47.512832 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:47 crc kubenswrapper[4925]: I0121 12:04:47.564696 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:55 crc kubenswrapper[4925]: I0121 12:04:55.547979 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:04:55 crc kubenswrapper[4925]: E0121 12:04:55.549142 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:04:57 crc kubenswrapper[4925]: I0121 12:04:57.558038 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:04:59 crc kubenswrapper[4925]: I0121 12:04:59.756143 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j5v5d"] Jan 21 12:04:59 crc kubenswrapper[4925]: I0121 12:04:59.758409 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:04:59 crc kubenswrapper[4925]: I0121 12:04:59.776806 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j5v5d"] Jan 21 12:04:59 crc kubenswrapper[4925]: I0121 12:04:59.798903 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk8cc\" (UniqueName: \"kubernetes.io/projected/36f885fb-926b-42ec-adce-1e6b1caf7b3e-kube-api-access-hk8cc\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:04:59 crc kubenswrapper[4925]: I0121 12:04:59.799130 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-utilities\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:04:59 crc kubenswrapper[4925]: I0121 12:04:59.799207 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-catalog-content\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.000306 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk8cc\" (UniqueName: \"kubernetes.io/projected/36f885fb-926b-42ec-adce-1e6b1caf7b3e-kube-api-access-hk8cc\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.000453 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-utilities\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.000484 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-catalog-content\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.000897 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-utilities\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.001213 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-catalog-content\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.021777 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk8cc\" (UniqueName: \"kubernetes.io/projected/36f885fb-926b-42ec-adce-1e6b1caf7b3e-kube-api-access-hk8cc\") pod \"redhat-operators-j5v5d\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.083169 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:00 crc kubenswrapper[4925]: I0121 12:05:00.670974 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j5v5d"] Jan 21 12:05:01 crc kubenswrapper[4925]: I0121 12:05:01.276079 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5v5d" event={"ID":"36f885fb-926b-42ec-adce-1e6b1caf7b3e","Type":"ContainerStarted","Data":"a7227906f155d83ee4dbaaf849a7ae1be648d5705a6bd3b20484e552d35976cc"} Jan 21 12:05:02 crc kubenswrapper[4925]: I0121 12:05:02.288567 4925 generic.go:334] "Generic (PLEG): container finished" podID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerID="3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99" exitCode=0 Jan 21 12:05:02 crc kubenswrapper[4925]: I0121 12:05:02.288628 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5v5d" event={"ID":"36f885fb-926b-42ec-adce-1e6b1caf7b3e","Type":"ContainerDied","Data":"3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99"} Jan 21 12:05:02 crc kubenswrapper[4925]: I0121 12:05:02.343071 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xm77b"] Jan 21 12:05:02 crc kubenswrapper[4925]: I0121 12:05:02.343551 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xm77b" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="registry-server" containerID="cri-o://b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68" gracePeriod=2 Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.024991 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.033995 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdrjh\" (UniqueName: \"kubernetes.io/projected/3abe4c19-e9c9-4809-b635-4b89de965d82-kube-api-access-gdrjh\") pod \"3abe4c19-e9c9-4809-b635-4b89de965d82\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.034408 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-catalog-content\") pod \"3abe4c19-e9c9-4809-b635-4b89de965d82\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.034562 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-utilities\") pod \"3abe4c19-e9c9-4809-b635-4b89de965d82\" (UID: \"3abe4c19-e9c9-4809-b635-4b89de965d82\") " Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.036261 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-utilities" (OuterVolumeSpecName: "utilities") pod "3abe4c19-e9c9-4809-b635-4b89de965d82" (UID: "3abe4c19-e9c9-4809-b635-4b89de965d82"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.044093 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3abe4c19-e9c9-4809-b635-4b89de965d82-kube-api-access-gdrjh" (OuterVolumeSpecName: "kube-api-access-gdrjh") pod "3abe4c19-e9c9-4809-b635-4b89de965d82" (UID: "3abe4c19-e9c9-4809-b635-4b89de965d82"). InnerVolumeSpecName "kube-api-access-gdrjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.104672 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3abe4c19-e9c9-4809-b635-4b89de965d82" (UID: "3abe4c19-e9c9-4809-b635-4b89de965d82"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.137324 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.137366 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abe4c19-e9c9-4809-b635-4b89de965d82-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.137379 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdrjh\" (UniqueName: \"kubernetes.io/projected/3abe4c19-e9c9-4809-b635-4b89de965d82-kube-api-access-gdrjh\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.309956 4925 generic.go:334] "Generic (PLEG): container finished" podID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerID="b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68" exitCode=0 Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.310012 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xm77b" event={"ID":"3abe4c19-e9c9-4809-b635-4b89de965d82","Type":"ContainerDied","Data":"b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68"} Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.310038 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xm77b" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.310048 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xm77b" event={"ID":"3abe4c19-e9c9-4809-b635-4b89de965d82","Type":"ContainerDied","Data":"d10c6b3d4276c3ddf850676cd5785cfc527167ded47f9f7cf4b3769b59f3af4b"} Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.310088 4925 scope.go:117] "RemoveContainer" containerID="b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.361156 4925 scope.go:117] "RemoveContainer" containerID="da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.372151 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xm77b"] Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.382823 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xm77b"] Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.393088 4925 scope.go:117] "RemoveContainer" containerID="ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.416193 4925 scope.go:117] "RemoveContainer" containerID="b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68" Jan 21 12:05:03 crc kubenswrapper[4925]: E0121 12:05:03.417219 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68\": container with ID starting with b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68 not found: ID does not exist" containerID="b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.417267 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68"} err="failed to get container status \"b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68\": rpc error: code = NotFound desc = could not find container \"b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68\": container with ID starting with b850084d297b1eabd74d9cefe325da11d151ff7c4fcb6c70b0f7ff7314aa1d68 not found: ID does not exist" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.417301 4925 scope.go:117] "RemoveContainer" containerID="da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c" Jan 21 12:05:03 crc kubenswrapper[4925]: E0121 12:05:03.417936 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c\": container with ID starting with da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c not found: ID does not exist" containerID="da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.417988 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c"} err="failed to get container status \"da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c\": rpc error: code = NotFound desc = could not find container \"da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c\": container with ID starting with da2e7bd99317cc30318e4b4c694b4ce09ba5ca47726533825f018a13d6194a0c not found: ID does not exist" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.418030 4925 scope.go:117] "RemoveContainer" containerID="ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757" Jan 21 12:05:03 crc kubenswrapper[4925]: E0121 12:05:03.419291 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757\": container with ID starting with ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757 not found: ID does not exist" containerID="ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.419346 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757"} err="failed to get container status \"ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757\": rpc error: code = NotFound desc = could not find container \"ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757\": container with ID starting with ac1cf99e9fd6b40dd8f34519b8fb6ab1b8566c58ab197b8101eac3f471cad757 not found: ID does not exist" Jan 21 12:05:03 crc kubenswrapper[4925]: I0121 12:05:03.617444 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" path="/var/lib/kubelet/pods/3abe4c19-e9c9-4809-b635-4b89de965d82/volumes" Jan 21 12:05:04 crc kubenswrapper[4925]: I0121 12:05:04.326897 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5v5d" event={"ID":"36f885fb-926b-42ec-adce-1e6b1caf7b3e","Type":"ContainerStarted","Data":"d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970"} Jan 21 12:05:05 crc kubenswrapper[4925]: I0121 12:05:05.344004 4925 generic.go:334] "Generic (PLEG): container finished" podID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerID="d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970" exitCode=0 Jan 21 12:05:05 crc kubenswrapper[4925]: I0121 12:05:05.344067 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5v5d" event={"ID":"36f885fb-926b-42ec-adce-1e6b1caf7b3e","Type":"ContainerDied","Data":"d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970"} Jan 21 12:05:07 crc kubenswrapper[4925]: I0121 12:05:07.382106 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5v5d" event={"ID":"36f885fb-926b-42ec-adce-1e6b1caf7b3e","Type":"ContainerStarted","Data":"7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0"} Jan 21 12:05:07 crc kubenswrapper[4925]: I0121 12:05:07.428918 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j5v5d" podStartSLOduration=4.603884664 podStartE2EDuration="8.428846429s" podCreationTimestamp="2026-01-21 12:04:59 +0000 UTC" firstStartedPulling="2026-01-21 12:05:02.290312982 +0000 UTC m=+4193.894204916" lastFinishedPulling="2026-01-21 12:05:06.115274747 +0000 UTC m=+4197.719166681" observedRunningTime="2026-01-21 12:05:07.422096577 +0000 UTC m=+4199.025988531" watchObservedRunningTime="2026-01-21 12:05:07.428846429 +0000 UTC m=+4199.032738383" Jan 21 12:05:09 crc kubenswrapper[4925]: I0121 12:05:09.539688 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:05:09 crc kubenswrapper[4925]: E0121 12:05:09.540072 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:05:10 crc kubenswrapper[4925]: I0121 12:05:10.083984 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:10 crc kubenswrapper[4925]: I0121 12:05:10.085624 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:11 crc kubenswrapper[4925]: I0121 12:05:11.138441 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j5v5d" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="registry-server" probeResult="failure" output=< Jan 21 12:05:11 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 12:05:11 crc kubenswrapper[4925]: > Jan 21 12:05:20 crc kubenswrapper[4925]: I0121 12:05:20.167256 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:20 crc kubenswrapper[4925]: I0121 12:05:20.217464 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:23 crc kubenswrapper[4925]: I0121 12:05:23.502910 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:05:23 crc kubenswrapper[4925]: I0121 12:05:23.930045 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j5v5d"] Jan 21 12:05:23 crc kubenswrapper[4925]: I0121 12:05:23.930601 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j5v5d" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="registry-server" containerID="cri-o://7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0" gracePeriod=2 Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.606310 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.633807 4925 generic.go:334] "Generic (PLEG): container finished" podID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerID="7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0" exitCode=0 Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.633849 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5v5d" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.633866 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5v5d" event={"ID":"36f885fb-926b-42ec-adce-1e6b1caf7b3e","Type":"ContainerDied","Data":"7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0"} Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.633899 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5v5d" event={"ID":"36f885fb-926b-42ec-adce-1e6b1caf7b3e","Type":"ContainerDied","Data":"a7227906f155d83ee4dbaaf849a7ae1be648d5705a6bd3b20484e552d35976cc"} Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.633921 4925 scope.go:117] "RemoveContainer" containerID="7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.638334 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"c15563ed1b90cc9b43047b4b30bdf9933c139f2692efbcaa33312f88a94bed10"} Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.708578 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk8cc\" (UniqueName: \"kubernetes.io/projected/36f885fb-926b-42ec-adce-1e6b1caf7b3e-kube-api-access-hk8cc\") pod \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.708734 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-utilities\") pod \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.708883 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-catalog-content\") pod \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\" (UID: \"36f885fb-926b-42ec-adce-1e6b1caf7b3e\") " Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.711395 4925 scope.go:117] "RemoveContainer" containerID="d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.712299 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-utilities" (OuterVolumeSpecName: "utilities") pod "36f885fb-926b-42ec-adce-1e6b1caf7b3e" (UID: "36f885fb-926b-42ec-adce-1e6b1caf7b3e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.732475 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36f885fb-926b-42ec-adce-1e6b1caf7b3e-kube-api-access-hk8cc" (OuterVolumeSpecName: "kube-api-access-hk8cc") pod "36f885fb-926b-42ec-adce-1e6b1caf7b3e" (UID: "36f885fb-926b-42ec-adce-1e6b1caf7b3e"). InnerVolumeSpecName "kube-api-access-hk8cc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.774578 4925 scope.go:117] "RemoveContainer" containerID="3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.812814 4925 scope.go:117] "RemoveContainer" containerID="7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0" Jan 21 12:05:25 crc kubenswrapper[4925]: E0121 12:05:25.814068 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0\": container with ID starting with 7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0 not found: ID does not exist" containerID="7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.814163 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0"} err="failed to get container status \"7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0\": rpc error: code = NotFound desc = could not find container \"7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0\": container with ID starting with 7a69156964345ee86e6ebbb58ffb5f10c6b7507842456b9bfea95e2e16882da0 not found: ID does not exist" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.814228 4925 scope.go:117] "RemoveContainer" containerID="d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970" Jan 21 12:05:25 crc kubenswrapper[4925]: E0121 12:05:25.814951 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970\": container with ID starting with d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970 not found: ID does not exist" containerID="d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.814983 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970"} err="failed to get container status \"d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970\": rpc error: code = NotFound desc = could not find container \"d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970\": container with ID starting with d2da5a626285d6dbd2b9e28c6b23b5c0b844e9054b599a5d5faf8ba25ff6c970 not found: ID does not exist" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.815003 4925 scope.go:117] "RemoveContainer" containerID="3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99" Jan 21 12:05:25 crc kubenswrapper[4925]: E0121 12:05:25.815284 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99\": container with ID starting with 3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99 not found: ID does not exist" containerID="3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.815317 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99"} err="failed to get container status \"3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99\": rpc error: code = NotFound desc = could not find container \"3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99\": container with ID starting with 3995f27605abc57520411d2e2e5b6c69b782f2017d923e6f17da136a599edc99 not found: ID does not exist" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.815682 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.815719 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk8cc\" (UniqueName: \"kubernetes.io/projected/36f885fb-926b-42ec-adce-1e6b1caf7b3e-kube-api-access-hk8cc\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.866885 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36f885fb-926b-42ec-adce-1e6b1caf7b3e" (UID: "36f885fb-926b-42ec-adce-1e6b1caf7b3e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.917437 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f885fb-926b-42ec-adce-1e6b1caf7b3e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.976940 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j5v5d"] Jan 21 12:05:25 crc kubenswrapper[4925]: I0121 12:05:25.982619 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j5v5d"] Jan 21 12:05:27 crc kubenswrapper[4925]: I0121 12:05:27.523166 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" path="/var/lib/kubelet/pods/36f885fb-926b-42ec-adce-1e6b1caf7b3e/volumes" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.361585 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6tz5n"] Jan 21 12:05:28 crc kubenswrapper[4925]: E0121 12:05:28.362448 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="registry-server" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362472 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="registry-server" Jan 21 12:05:28 crc kubenswrapper[4925]: E0121 12:05:28.362494 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="registry-server" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362503 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="registry-server" Jan 21 12:05:28 crc kubenswrapper[4925]: E0121 12:05:28.362524 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="extract-utilities" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362534 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="extract-utilities" Jan 21 12:05:28 crc kubenswrapper[4925]: E0121 12:05:28.362547 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="extract-content" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362555 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="extract-content" Jan 21 12:05:28 crc kubenswrapper[4925]: E0121 12:05:28.362573 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="extract-content" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362581 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="extract-content" Jan 21 12:05:28 crc kubenswrapper[4925]: E0121 12:05:28.362591 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="extract-utilities" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362599 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="extract-utilities" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362823 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="3abe4c19-e9c9-4809-b635-4b89de965d82" containerName="registry-server" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.362847 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="36f885fb-926b-42ec-adce-1e6b1caf7b3e" containerName="registry-server" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.364509 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.377911 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6tz5n"] Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.464168 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvmht\" (UniqueName: \"kubernetes.io/projected/db2fca8a-e7ab-48dd-994a-f8b2ed759212-kube-api-access-mvmht\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.464252 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-utilities\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.464367 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-catalog-content\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.566021 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvmht\" (UniqueName: \"kubernetes.io/projected/db2fca8a-e7ab-48dd-994a-f8b2ed759212-kube-api-access-mvmht\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.566118 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-utilities\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.566191 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-catalog-content\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.566803 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-catalog-content\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.567436 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-utilities\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.596384 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvmht\" (UniqueName: \"kubernetes.io/projected/db2fca8a-e7ab-48dd-994a-f8b2ed759212-kube-api-access-mvmht\") pod \"community-operators-6tz5n\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:28 crc kubenswrapper[4925]: I0121 12:05:28.687023 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:29 crc kubenswrapper[4925]: I0121 12:05:29.195083 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6tz5n"] Jan 21 12:05:29 crc kubenswrapper[4925]: I0121 12:05:29.717143 4925 generic.go:334] "Generic (PLEG): container finished" podID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerID="9e8bbe7c3e43649f8ac5ad195001b3c9af472bf98974760e13af419072d2ac83" exitCode=0 Jan 21 12:05:29 crc kubenswrapper[4925]: I0121 12:05:29.717324 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6tz5n" event={"ID":"db2fca8a-e7ab-48dd-994a-f8b2ed759212","Type":"ContainerDied","Data":"9e8bbe7c3e43649f8ac5ad195001b3c9af472bf98974760e13af419072d2ac83"} Jan 21 12:05:29 crc kubenswrapper[4925]: I0121 12:05:29.717634 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6tz5n" event={"ID":"db2fca8a-e7ab-48dd-994a-f8b2ed759212","Type":"ContainerStarted","Data":"41069d7a905de2805f7bd289b2f45ca5cf58ddf35a04a9d9dd681c508d6e53cd"} Jan 21 12:05:31 crc kubenswrapper[4925]: I0121 12:05:31.786241 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6tz5n" event={"ID":"db2fca8a-e7ab-48dd-994a-f8b2ed759212","Type":"ContainerStarted","Data":"7ad27df0eac20331008e6f1e672fa05806156d232c6e033ded244736a05ea6a0"} Jan 21 12:05:32 crc kubenswrapper[4925]: I0121 12:05:32.795788 4925 generic.go:334] "Generic (PLEG): container finished" podID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerID="7ad27df0eac20331008e6f1e672fa05806156d232c6e033ded244736a05ea6a0" exitCode=0 Jan 21 12:05:32 crc kubenswrapper[4925]: I0121 12:05:32.795911 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6tz5n" event={"ID":"db2fca8a-e7ab-48dd-994a-f8b2ed759212","Type":"ContainerDied","Data":"7ad27df0eac20331008e6f1e672fa05806156d232c6e033ded244736a05ea6a0"} Jan 21 12:05:36 crc kubenswrapper[4925]: I0121 12:05:36.837076 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6tz5n" event={"ID":"db2fca8a-e7ab-48dd-994a-f8b2ed759212","Type":"ContainerStarted","Data":"5af1b5ac6267ff3e3961e09dc64757125e19feb07386836640b416c1b119e109"} Jan 21 12:05:36 crc kubenswrapper[4925]: I0121 12:05:36.928130 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6tz5n" podStartSLOduration=2.241829776 podStartE2EDuration="8.928102363s" podCreationTimestamp="2026-01-21 12:05:28 +0000 UTC" firstStartedPulling="2026-01-21 12:05:29.719090092 +0000 UTC m=+4221.322982026" lastFinishedPulling="2026-01-21 12:05:36.405362639 +0000 UTC m=+4228.009254613" observedRunningTime="2026-01-21 12:05:36.906964288 +0000 UTC m=+4228.510856222" watchObservedRunningTime="2026-01-21 12:05:36.928102363 +0000 UTC m=+4228.531994287" Jan 21 12:05:38 crc kubenswrapper[4925]: I0121 12:05:38.687497 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:38 crc kubenswrapper[4925]: I0121 12:05:38.687867 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:39 crc kubenswrapper[4925]: I0121 12:05:39.742161 4925 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-6tz5n" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="registry-server" probeResult="failure" output=< Jan 21 12:05:39 crc kubenswrapper[4925]: timeout: failed to connect service ":50051" within 1s Jan 21 12:05:39 crc kubenswrapper[4925]: > Jan 21 12:05:48 crc kubenswrapper[4925]: I0121 12:05:48.738684 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:48 crc kubenswrapper[4925]: I0121 12:05:48.792802 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:52 crc kubenswrapper[4925]: I0121 12:05:52.344524 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6tz5n"] Jan 21 12:05:52 crc kubenswrapper[4925]: I0121 12:05:52.345068 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6tz5n" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="registry-server" containerID="cri-o://5af1b5ac6267ff3e3961e09dc64757125e19feb07386836640b416c1b119e109" gracePeriod=2 Jan 21 12:05:54 crc kubenswrapper[4925]: I0121 12:05:54.989592 4925 generic.go:334] "Generic (PLEG): container finished" podID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerID="5af1b5ac6267ff3e3961e09dc64757125e19feb07386836640b416c1b119e109" exitCode=0 Jan 21 12:05:54 crc kubenswrapper[4925]: I0121 12:05:54.989640 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6tz5n" event={"ID":"db2fca8a-e7ab-48dd-994a-f8b2ed759212","Type":"ContainerDied","Data":"5af1b5ac6267ff3e3961e09dc64757125e19feb07386836640b416c1b119e109"} Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.641160 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.829508 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-catalog-content\") pod \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.830111 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-utilities\") pod \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.830261 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvmht\" (UniqueName: \"kubernetes.io/projected/db2fca8a-e7ab-48dd-994a-f8b2ed759212-kube-api-access-mvmht\") pod \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\" (UID: \"db2fca8a-e7ab-48dd-994a-f8b2ed759212\") " Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.831239 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-utilities" (OuterVolumeSpecName: "utilities") pod "db2fca8a-e7ab-48dd-994a-f8b2ed759212" (UID: "db2fca8a-e7ab-48dd-994a-f8b2ed759212"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.837006 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db2fca8a-e7ab-48dd-994a-f8b2ed759212-kube-api-access-mvmht" (OuterVolumeSpecName: "kube-api-access-mvmht") pod "db2fca8a-e7ab-48dd-994a-f8b2ed759212" (UID: "db2fca8a-e7ab-48dd-994a-f8b2ed759212"). InnerVolumeSpecName "kube-api-access-mvmht". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.933013 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.933070 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvmht\" (UniqueName: \"kubernetes.io/projected/db2fca8a-e7ab-48dd-994a-f8b2ed759212-kube-api-access-mvmht\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:55 crc kubenswrapper[4925]: I0121 12:05:55.941378 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "db2fca8a-e7ab-48dd-994a-f8b2ed759212" (UID: "db2fca8a-e7ab-48dd-994a-f8b2ed759212"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.005987 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6tz5n" event={"ID":"db2fca8a-e7ab-48dd-994a-f8b2ed759212","Type":"ContainerDied","Data":"41069d7a905de2805f7bd289b2f45ca5cf58ddf35a04a9d9dd681c508d6e53cd"} Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.006086 4925 scope.go:117] "RemoveContainer" containerID="5af1b5ac6267ff3e3961e09dc64757125e19feb07386836640b416c1b119e109" Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.006203 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6tz5n" Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.038109 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db2fca8a-e7ab-48dd-994a-f8b2ed759212-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.048607 4925 scope.go:117] "RemoveContainer" containerID="7ad27df0eac20331008e6f1e672fa05806156d232c6e033ded244736a05ea6a0" Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.058356 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6tz5n"] Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.065082 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6tz5n"] Jan 21 12:05:56 crc kubenswrapper[4925]: I0121 12:05:56.076903 4925 scope.go:117] "RemoveContainer" containerID="9e8bbe7c3e43649f8ac5ad195001b3c9af472bf98974760e13af419072d2ac83" Jan 21 12:05:57 crc kubenswrapper[4925]: I0121 12:05:57.512671 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" path="/var/lib/kubelet/pods/db2fca8a-e7ab-48dd-994a-f8b2ed759212/volumes" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.162904 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8lm88"] Jan 21 12:07:35 crc kubenswrapper[4925]: E0121 12:07:35.164062 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="extract-utilities" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.164083 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="extract-utilities" Jan 21 12:07:35 crc kubenswrapper[4925]: E0121 12:07:35.164103 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="registry-server" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.164112 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="registry-server" Jan 21 12:07:35 crc kubenswrapper[4925]: E0121 12:07:35.164146 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="extract-content" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.164155 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="extract-content" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.164356 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="db2fca8a-e7ab-48dd-994a-f8b2ed759212" containerName="registry-server" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.166280 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.187032 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lm88"] Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.239276 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-catalog-content\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.239384 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-utilities\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.239766 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcx6w\" (UniqueName: \"kubernetes.io/projected/9a801ac9-da75-4c2e-a16b-90f246211e18-kube-api-access-tcx6w\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.341985 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-catalog-content\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.342070 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-utilities\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.342188 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcx6w\" (UniqueName: \"kubernetes.io/projected/9a801ac9-da75-4c2e-a16b-90f246211e18-kube-api-access-tcx6w\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.343089 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-catalog-content\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.343196 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-utilities\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.369248 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcx6w\" (UniqueName: \"kubernetes.io/projected/9a801ac9-da75-4c2e-a16b-90f246211e18-kube-api-access-tcx6w\") pod \"redhat-marketplace-8lm88\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:35 crc kubenswrapper[4925]: I0121 12:07:35.492051 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:36 crc kubenswrapper[4925]: I0121 12:07:36.308047 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lm88"] Jan 21 12:07:36 crc kubenswrapper[4925]: I0121 12:07:36.565536 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lm88" event={"ID":"9a801ac9-da75-4c2e-a16b-90f246211e18","Type":"ContainerStarted","Data":"2ff4e8976fb7d48c7de8196ac094a7c2bb36572de1f4375d2e4ec65f16d31dd6"} Jan 21 12:07:37 crc kubenswrapper[4925]: I0121 12:07:37.575530 4925 generic.go:334] "Generic (PLEG): container finished" podID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerID="8a26f00da38e97db9e9d7c2ee8303b8e6fbe3017ba764cae752b2ea33817ecaf" exitCode=0 Jan 21 12:07:37 crc kubenswrapper[4925]: I0121 12:07:37.575602 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lm88" event={"ID":"9a801ac9-da75-4c2e-a16b-90f246211e18","Type":"ContainerDied","Data":"8a26f00da38e97db9e9d7c2ee8303b8e6fbe3017ba764cae752b2ea33817ecaf"} Jan 21 12:07:40 crc kubenswrapper[4925]: I0121 12:07:40.620216 4925 generic.go:334] "Generic (PLEG): container finished" podID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerID="3c4b76a34643116d154ef4fae680c8f7541a63cf492dd66cbb298071f6db4917" exitCode=0 Jan 21 12:07:40 crc kubenswrapper[4925]: I0121 12:07:40.620356 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lm88" event={"ID":"9a801ac9-da75-4c2e-a16b-90f246211e18","Type":"ContainerDied","Data":"3c4b76a34643116d154ef4fae680c8f7541a63cf492dd66cbb298071f6db4917"} Jan 21 12:07:43 crc kubenswrapper[4925]: I0121 12:07:43.647274 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lm88" event={"ID":"9a801ac9-da75-4c2e-a16b-90f246211e18","Type":"ContainerStarted","Data":"be87a68f990409285bcb4c59b21dbf033bdcbdfc0ec43e76e2e75b434a25bc7c"} Jan 21 12:07:43 crc kubenswrapper[4925]: I0121 12:07:43.674043 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8lm88" podStartSLOduration=3.979226503 podStartE2EDuration="8.674004924s" podCreationTimestamp="2026-01-21 12:07:35 +0000 UTC" firstStartedPulling="2026-01-21 12:07:37.581362315 +0000 UTC m=+4349.185254249" lastFinishedPulling="2026-01-21 12:07:42.276140726 +0000 UTC m=+4353.880032670" observedRunningTime="2026-01-21 12:07:43.667041924 +0000 UTC m=+4355.270933858" watchObservedRunningTime="2026-01-21 12:07:43.674004924 +0000 UTC m=+4355.277896858" Jan 21 12:07:45 crc kubenswrapper[4925]: I0121 12:07:45.492800 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:45 crc kubenswrapper[4925]: I0121 12:07:45.494087 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:45 crc kubenswrapper[4925]: I0121 12:07:45.549236 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:49 crc kubenswrapper[4925]: I0121 12:07:49.941183 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:07:49 crc kubenswrapper[4925]: I0121 12:07:49.941659 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:07:55 crc kubenswrapper[4925]: I0121 12:07:55.557308 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:07:59 crc kubenswrapper[4925]: I0121 12:07:59.141366 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lm88"] Jan 21 12:07:59 crc kubenswrapper[4925]: I0121 12:07:59.142087 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8lm88" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="registry-server" containerID="cri-o://be87a68f990409285bcb4c59b21dbf033bdcbdfc0ec43e76e2e75b434a25bc7c" gracePeriod=2 Jan 21 12:07:59 crc kubenswrapper[4925]: I0121 12:07:59.895046 4925 generic.go:334] "Generic (PLEG): container finished" podID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerID="be87a68f990409285bcb4c59b21dbf033bdcbdfc0ec43e76e2e75b434a25bc7c" exitCode=0 Jan 21 12:07:59 crc kubenswrapper[4925]: I0121 12:07:59.895147 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lm88" event={"ID":"9a801ac9-da75-4c2e-a16b-90f246211e18","Type":"ContainerDied","Data":"be87a68f990409285bcb4c59b21dbf033bdcbdfc0ec43e76e2e75b434a25bc7c"} Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.158347 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.349448 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-catalog-content\") pod \"9a801ac9-da75-4c2e-a16b-90f246211e18\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.349540 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcx6w\" (UniqueName: \"kubernetes.io/projected/9a801ac9-da75-4c2e-a16b-90f246211e18-kube-api-access-tcx6w\") pod \"9a801ac9-da75-4c2e-a16b-90f246211e18\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.349729 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-utilities\") pod \"9a801ac9-da75-4c2e-a16b-90f246211e18\" (UID: \"9a801ac9-da75-4c2e-a16b-90f246211e18\") " Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.350706 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-utilities" (OuterVolumeSpecName: "utilities") pod "9a801ac9-da75-4c2e-a16b-90f246211e18" (UID: "9a801ac9-da75-4c2e-a16b-90f246211e18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.356932 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a801ac9-da75-4c2e-a16b-90f246211e18-kube-api-access-tcx6w" (OuterVolumeSpecName: "kube-api-access-tcx6w") pod "9a801ac9-da75-4c2e-a16b-90f246211e18" (UID: "9a801ac9-da75-4c2e-a16b-90f246211e18"). InnerVolumeSpecName "kube-api-access-tcx6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.402060 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a801ac9-da75-4c2e-a16b-90f246211e18" (UID: "9a801ac9-da75-4c2e-a16b-90f246211e18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.451573 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.451623 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcx6w\" (UniqueName: \"kubernetes.io/projected/9a801ac9-da75-4c2e-a16b-90f246211e18-kube-api-access-tcx6w\") on node \"crc\" DevicePath \"\"" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.451639 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a801ac9-da75-4c2e-a16b-90f246211e18-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.907922 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8lm88" event={"ID":"9a801ac9-da75-4c2e-a16b-90f246211e18","Type":"ContainerDied","Data":"2ff4e8976fb7d48c7de8196ac094a7c2bb36572de1f4375d2e4ec65f16d31dd6"} Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.907998 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8lm88" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.908016 4925 scope.go:117] "RemoveContainer" containerID="be87a68f990409285bcb4c59b21dbf033bdcbdfc0ec43e76e2e75b434a25bc7c" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.936067 4925 scope.go:117] "RemoveContainer" containerID="3c4b76a34643116d154ef4fae680c8f7541a63cf492dd66cbb298071f6db4917" Jan 21 12:08:00 crc kubenswrapper[4925]: I0121 12:08:00.959868 4925 scope.go:117] "RemoveContainer" containerID="8a26f00da38e97db9e9d7c2ee8303b8e6fbe3017ba764cae752b2ea33817ecaf" Jan 21 12:08:01 crc kubenswrapper[4925]: I0121 12:08:01.005607 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lm88"] Jan 21 12:08:01 crc kubenswrapper[4925]: I0121 12:08:01.020503 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8lm88"] Jan 21 12:08:01 crc kubenswrapper[4925]: I0121 12:08:01.513773 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" path="/var/lib/kubelet/pods/9a801ac9-da75-4c2e-a16b-90f246211e18/volumes" Jan 21 12:08:19 crc kubenswrapper[4925]: I0121 12:08:19.940698 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:08:19 crc kubenswrapper[4925]: I0121 12:08:19.941328 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:08:49 crc kubenswrapper[4925]: I0121 12:08:49.940605 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:08:49 crc kubenswrapper[4925]: I0121 12:08:49.941236 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:08:49 crc kubenswrapper[4925]: I0121 12:08:49.941320 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 12:08:49 crc kubenswrapper[4925]: I0121 12:08:49.941977 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c15563ed1b90cc9b43047b4b30bdf9933c139f2692efbcaa33312f88a94bed10"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 12:08:49 crc kubenswrapper[4925]: I0121 12:08:49.942077 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://c15563ed1b90cc9b43047b4b30bdf9933c139f2692efbcaa33312f88a94bed10" gracePeriod=600 Jan 21 12:08:50 crc kubenswrapper[4925]: I0121 12:08:50.473102 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="c15563ed1b90cc9b43047b4b30bdf9933c139f2692efbcaa33312f88a94bed10" exitCode=0 Jan 21 12:08:50 crc kubenswrapper[4925]: I0121 12:08:50.473200 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"c15563ed1b90cc9b43047b4b30bdf9933c139f2692efbcaa33312f88a94bed10"} Jan 21 12:08:50 crc kubenswrapper[4925]: I0121 12:08:50.473563 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a"} Jan 21 12:08:50 crc kubenswrapper[4925]: I0121 12:08:50.473602 4925 scope.go:117] "RemoveContainer" containerID="b68c4942f94f417bb545a1286e2edb5bceff977b3f1f02850cec8a8b7aff9875" Jan 21 12:11:19 crc kubenswrapper[4925]: I0121 12:11:19.941729 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:11:19 crc kubenswrapper[4925]: I0121 12:11:19.942497 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:11:49 crc kubenswrapper[4925]: I0121 12:11:49.940859 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:11:49 crc kubenswrapper[4925]: I0121 12:11:49.941531 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:12:19 crc kubenswrapper[4925]: I0121 12:12:19.941564 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:12:19 crc kubenswrapper[4925]: I0121 12:12:19.942268 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:12:19 crc kubenswrapper[4925]: I0121 12:12:19.942344 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 12:12:19 crc kubenswrapper[4925]: I0121 12:12:19.943298 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 12:12:19 crc kubenswrapper[4925]: I0121 12:12:19.943411 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" gracePeriod=600 Jan 21 12:12:21 crc kubenswrapper[4925]: E0121 12:12:21.353594 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:12:21 crc kubenswrapper[4925]: I0121 12:12:21.690856 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" exitCode=0 Jan 21 12:12:21 crc kubenswrapper[4925]: I0121 12:12:21.690936 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a"} Jan 21 12:12:21 crc kubenswrapper[4925]: I0121 12:12:21.691096 4925 scope.go:117] "RemoveContainer" containerID="c15563ed1b90cc9b43047b4b30bdf9933c139f2692efbcaa33312f88a94bed10" Jan 21 12:12:21 crc kubenswrapper[4925]: I0121 12:12:21.691787 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:12:21 crc kubenswrapper[4925]: E0121 12:12:21.692100 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:12:34 crc kubenswrapper[4925]: I0121 12:12:34.502973 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:12:34 crc kubenswrapper[4925]: E0121 12:12:34.503833 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:12:49 crc kubenswrapper[4925]: I0121 12:12:49.619232 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:12:49 crc kubenswrapper[4925]: E0121 12:12:49.620065 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:13:01 crc kubenswrapper[4925]: I0121 12:13:01.501646 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:13:01 crc kubenswrapper[4925]: E0121 12:13:01.502433 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:13:16 crc kubenswrapper[4925]: I0121 12:13:16.502075 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:13:16 crc kubenswrapper[4925]: E0121 12:13:16.502894 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:13:31 crc kubenswrapper[4925]: I0121 12:13:31.525441 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:13:31 crc kubenswrapper[4925]: E0121 12:13:31.526936 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:13:46 crc kubenswrapper[4925]: I0121 12:13:46.502206 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:13:46 crc kubenswrapper[4925]: E0121 12:13:46.503267 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:13:57 crc kubenswrapper[4925]: I0121 12:13:57.502647 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:13:57 crc kubenswrapper[4925]: E0121 12:13:57.503529 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:14:10 crc kubenswrapper[4925]: I0121 12:14:10.502867 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:14:10 crc kubenswrapper[4925]: E0121 12:14:10.503916 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:14:22 crc kubenswrapper[4925]: I0121 12:14:22.506218 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:14:22 crc kubenswrapper[4925]: E0121 12:14:22.508227 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:14:33 crc kubenswrapper[4925]: I0121 12:14:33.515248 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:14:33 crc kubenswrapper[4925]: E0121 12:14:33.516149 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:14:45 crc kubenswrapper[4925]: I0121 12:14:45.501733 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:14:45 crc kubenswrapper[4925]: E0121 12:14:45.505931 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.186997 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh"] Jan 21 12:15:00 crc kubenswrapper[4925]: E0121 12:15:00.192006 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="registry-server" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.192040 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="registry-server" Jan 21 12:15:00 crc kubenswrapper[4925]: E0121 12:15:00.192063 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="extract-content" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.192069 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="extract-content" Jan 21 12:15:00 crc kubenswrapper[4925]: E0121 12:15:00.192099 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="extract-utilities" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.192106 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="extract-utilities" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.192281 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a801ac9-da75-4c2e-a16b-90f246211e18" containerName="registry-server" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.193141 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.197826 4925 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.198913 4925 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.208321 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh"] Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.390331 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vplc\" (UniqueName: \"kubernetes.io/projected/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-kube-api-access-5vplc\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.390478 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-secret-volume\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.390843 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-config-volume\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.492566 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-config-volume\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.492635 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vplc\" (UniqueName: \"kubernetes.io/projected/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-kube-api-access-5vplc\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.492683 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-secret-volume\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.493958 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-config-volume\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.500615 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-secret-volume\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.501748 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:15:00 crc kubenswrapper[4925]: E0121 12:15:00.502023 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.524561 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vplc\" (UniqueName: \"kubernetes.io/projected/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-kube-api-access-5vplc\") pod \"collect-profiles-29483295-xjvfh\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:00 crc kubenswrapper[4925]: I0121 12:15:00.535467 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:01 crc kubenswrapper[4925]: I0121 12:15:01.264858 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh"] Jan 21 12:15:01 crc kubenswrapper[4925]: I0121 12:15:01.329593 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" event={"ID":"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab","Type":"ContainerStarted","Data":"ce2217abe1125126ab48ab96988c5aea526c1930441a56fbf092330f6a73426e"} Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.190378 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6fv5w"] Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.193697 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.205198 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6fv5w"] Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.458011 4925 generic.go:334] "Generic (PLEG): container finished" podID="ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab" containerID="376c5bf421e41b225b96fed5eae184e3ab8386330682b647e19e662f94996202" exitCode=0 Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.458065 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" event={"ID":"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab","Type":"ContainerDied","Data":"376c5bf421e41b225b96fed5eae184e3ab8386330682b647e19e662f94996202"} Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.530636 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-catalog-content\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.530865 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfnmx\" (UniqueName: \"kubernetes.io/projected/f9cae190-2552-436f-80de-7632f1a966c4-kube-api-access-hfnmx\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.531064 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-utilities\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.634641 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-catalog-content\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.634713 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfnmx\" (UniqueName: \"kubernetes.io/projected/f9cae190-2552-436f-80de-7632f1a966c4-kube-api-access-hfnmx\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.634791 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-utilities\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.635647 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-utilities\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.635761 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-catalog-content\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.660816 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfnmx\" (UniqueName: \"kubernetes.io/projected/f9cae190-2552-436f-80de-7632f1a966c4-kube-api-access-hfnmx\") pod \"redhat-operators-6fv5w\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:04 crc kubenswrapper[4925]: I0121 12:15:04.818013 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:05 crc kubenswrapper[4925]: I0121 12:15:05.444102 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6fv5w"] Jan 21 12:15:05 crc kubenswrapper[4925]: I0121 12:15:05.468841 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fv5w" event={"ID":"f9cae190-2552-436f-80de-7632f1a966c4","Type":"ContainerStarted","Data":"d64ce95768bbec2d0ef931b8a01c100c5d39102fc22674e97b22795f69b1c1ee"} Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.482490 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" event={"ID":"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab","Type":"ContainerDied","Data":"ce2217abe1125126ab48ab96988c5aea526c1930441a56fbf092330f6a73426e"} Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.482881 4925 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce2217abe1125126ab48ab96988c5aea526c1930441a56fbf092330f6a73426e" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.494038 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.584231 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-secret-volume\") pod \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.584327 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-config-volume\") pod \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.584374 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vplc\" (UniqueName: \"kubernetes.io/projected/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-kube-api-access-5vplc\") pod \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\" (UID: \"ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab\") " Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.585106 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-config-volume" (OuterVolumeSpecName: "config-volume") pod "ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab" (UID: "ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.585670 4925 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.590867 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-kube-api-access-5vplc" (OuterVolumeSpecName: "kube-api-access-5vplc") pod "ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab" (UID: "ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab"). InnerVolumeSpecName "kube-api-access-5vplc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.592322 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab" (UID: "ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.687225 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vplc\" (UniqueName: \"kubernetes.io/projected/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-kube-api-access-5vplc\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:06.687260 4925 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:07.498592 4925 generic.go:334] "Generic (PLEG): container finished" podID="f9cae190-2552-436f-80de-7632f1a966c4" containerID="855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d" exitCode=0 Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:07.498923 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483295-xjvfh" Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:07.499586 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fv5w" event={"ID":"f9cae190-2552-436f-80de-7632f1a966c4","Type":"ContainerDied","Data":"855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d"} Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:07.636800 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9"] Jan 21 12:15:07 crc kubenswrapper[4925]: I0121 12:15:07.663381 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483250-dhdz9"] Jan 21 12:15:08 crc kubenswrapper[4925]: I0121 12:15:08.509208 4925 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 12:15:09 crc kubenswrapper[4925]: I0121 12:15:09.540441 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5195f4a-967f-42f5-9402-9d4473fb49c5" path="/var/lib/kubelet/pods/e5195f4a-967f-42f5-9402-9d4473fb49c5/volumes" Jan 21 12:15:11 crc kubenswrapper[4925]: I0121 12:15:11.581269 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fv5w" event={"ID":"f9cae190-2552-436f-80de-7632f1a966c4","Type":"ContainerStarted","Data":"037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59"} Jan 21 12:15:13 crc kubenswrapper[4925]: I0121 12:15:13.731590 4925 generic.go:334] "Generic (PLEG): container finished" podID="f9cae190-2552-436f-80de-7632f1a966c4" containerID="037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59" exitCode=0 Jan 21 12:15:13 crc kubenswrapper[4925]: I0121 12:15:13.731692 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fv5w" event={"ID":"f9cae190-2552-436f-80de-7632f1a966c4","Type":"ContainerDied","Data":"037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59"} Jan 21 12:15:14 crc kubenswrapper[4925]: I0121 12:15:14.502257 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:15:14 crc kubenswrapper[4925]: E0121 12:15:14.502782 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:15:15 crc kubenswrapper[4925]: I0121 12:15:15.753625 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fv5w" event={"ID":"f9cae190-2552-436f-80de-7632f1a966c4","Type":"ContainerStarted","Data":"4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343"} Jan 21 12:15:15 crc kubenswrapper[4925]: I0121 12:15:15.781465 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6fv5w" podStartSLOduration=5.304643825 podStartE2EDuration="11.781423965s" podCreationTimestamp="2026-01-21 12:15:04 +0000 UTC" firstStartedPulling="2026-01-21 12:15:08.508804336 +0000 UTC m=+4800.112696280" lastFinishedPulling="2026-01-21 12:15:14.985584485 +0000 UTC m=+4806.589476420" observedRunningTime="2026-01-21 12:15:15.775700235 +0000 UTC m=+4807.379592179" watchObservedRunningTime="2026-01-21 12:15:15.781423965 +0000 UTC m=+4807.385315899" Jan 21 12:15:24 crc kubenswrapper[4925]: I0121 12:15:24.819162 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:24 crc kubenswrapper[4925]: I0121 12:15:24.821097 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:24 crc kubenswrapper[4925]: I0121 12:15:24.872421 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:25 crc kubenswrapper[4925]: I0121 12:15:25.913366 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:26 crc kubenswrapper[4925]: I0121 12:15:26.502150 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:15:26 crc kubenswrapper[4925]: E0121 12:15:26.502508 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:15:28 crc kubenswrapper[4925]: I0121 12:15:28.374321 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6fv5w"] Jan 21 12:15:28 crc kubenswrapper[4925]: I0121 12:15:28.375024 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6fv5w" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="registry-server" containerID="cri-o://4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343" gracePeriod=2 Jan 21 12:15:28 crc kubenswrapper[4925]: E0121 12:15:28.720324 4925 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9cae190_2552_436f_80de_7632f1a966c4.slice/crio-4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343.scope\": RecentStats: unable to find data in memory cache]" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.541127 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.731621 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfnmx\" (UniqueName: \"kubernetes.io/projected/f9cae190-2552-436f-80de-7632f1a966c4-kube-api-access-hfnmx\") pod \"f9cae190-2552-436f-80de-7632f1a966c4\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.732496 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-utilities\") pod \"f9cae190-2552-436f-80de-7632f1a966c4\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.732823 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-catalog-content\") pod \"f9cae190-2552-436f-80de-7632f1a966c4\" (UID: \"f9cae190-2552-436f-80de-7632f1a966c4\") " Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.734714 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-utilities" (OuterVolumeSpecName: "utilities") pod "f9cae190-2552-436f-80de-7632f1a966c4" (UID: "f9cae190-2552-436f-80de-7632f1a966c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.740360 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9cae190-2552-436f-80de-7632f1a966c4-kube-api-access-hfnmx" (OuterVolumeSpecName: "kube-api-access-hfnmx") pod "f9cae190-2552-436f-80de-7632f1a966c4" (UID: "f9cae190-2552-436f-80de-7632f1a966c4"). InnerVolumeSpecName "kube-api-access-hfnmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.835314 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.835359 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfnmx\" (UniqueName: \"kubernetes.io/projected/f9cae190-2552-436f-80de-7632f1a966c4-kube-api-access-hfnmx\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.887961 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9cae190-2552-436f-80de-7632f1a966c4" (UID: "f9cae190-2552-436f-80de-7632f1a966c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.936782 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cae190-2552-436f-80de-7632f1a966c4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.941455 4925 generic.go:334] "Generic (PLEG): container finished" podID="f9cae190-2552-436f-80de-7632f1a966c4" containerID="4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343" exitCode=0 Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.941513 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fv5w" event={"ID":"f9cae190-2552-436f-80de-7632f1a966c4","Type":"ContainerDied","Data":"4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343"} Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.941554 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fv5w" event={"ID":"f9cae190-2552-436f-80de-7632f1a966c4","Type":"ContainerDied","Data":"d64ce95768bbec2d0ef931b8a01c100c5d39102fc22674e97b22795f69b1c1ee"} Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.941574 4925 scope.go:117] "RemoveContainer" containerID="4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.941523 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fv5w" Jan 21 12:15:29 crc kubenswrapper[4925]: I0121 12:15:29.998675 4925 scope.go:117] "RemoveContainer" containerID="037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59" Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.000484 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6fv5w"] Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.007785 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6fv5w"] Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.022613 4925 scope.go:117] "RemoveContainer" containerID="855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d" Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.057440 4925 scope.go:117] "RemoveContainer" containerID="4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343" Jan 21 12:15:30 crc kubenswrapper[4925]: E0121 12:15:30.058257 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343\": container with ID starting with 4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343 not found: ID does not exist" containerID="4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343" Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.058311 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343"} err="failed to get container status \"4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343\": rpc error: code = NotFound desc = could not find container \"4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343\": container with ID starting with 4a0460758447d906baf71d8dd660fbd7b94c178d2b853fc8f320bfc34c7ba343 not found: ID does not exist" Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.059434 4925 scope.go:117] "RemoveContainer" containerID="037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59" Jan 21 12:15:30 crc kubenswrapper[4925]: E0121 12:15:30.059878 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59\": container with ID starting with 037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59 not found: ID does not exist" containerID="037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59" Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.059922 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59"} err="failed to get container status \"037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59\": rpc error: code = NotFound desc = could not find container \"037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59\": container with ID starting with 037c6aadc0e7b97675f3b587841de93d94b75b337f172cf205ec9b64f404de59 not found: ID does not exist" Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.059942 4925 scope.go:117] "RemoveContainer" containerID="855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d" Jan 21 12:15:30 crc kubenswrapper[4925]: E0121 12:15:30.060465 4925 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d\": container with ID starting with 855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d not found: ID does not exist" containerID="855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d" Jan 21 12:15:30 crc kubenswrapper[4925]: I0121 12:15:30.060595 4925 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d"} err="failed to get container status \"855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d\": rpc error: code = NotFound desc = could not find container \"855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d\": container with ID starting with 855f4a2b6a04b30f5d9157b3f58d0b60d5f9e9335435ab19a36a2785a56acb3d not found: ID does not exist" Jan 21 12:15:31 crc kubenswrapper[4925]: I0121 12:15:31.517815 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9cae190-2552-436f-80de-7632f1a966c4" path="/var/lib/kubelet/pods/f9cae190-2552-436f-80de-7632f1a966c4/volumes" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.018342 4925 scope.go:117] "RemoveContainer" containerID="8b13216693667775f7b24ef39973fad9d1b6f53a54e22559bbe0f6771524b418" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.213903 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fcbvk"] Jan 21 12:15:35 crc kubenswrapper[4925]: E0121 12:15:35.214684 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="extract-utilities" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.214714 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="extract-utilities" Jan 21 12:15:35 crc kubenswrapper[4925]: E0121 12:15:35.214730 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="registry-server" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.214740 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="registry-server" Jan 21 12:15:35 crc kubenswrapper[4925]: E0121 12:15:35.214761 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab" containerName="collect-profiles" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.214769 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab" containerName="collect-profiles" Jan 21 12:15:35 crc kubenswrapper[4925]: E0121 12:15:35.214796 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="extract-content" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.214805 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="extract-content" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.215076 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9cae190-2552-436f-80de-7632f1a966c4" containerName="registry-server" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.215123 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad9fb0e6-2a5f-4699-8a43-bb7cc8bc05ab" containerName="collect-profiles" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.217930 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.232752 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fcbvk"] Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.393098 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gw9m\" (UniqueName: \"kubernetes.io/projected/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-kube-api-access-8gw9m\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.393194 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-utilities\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.393578 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-catalog-content\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.495516 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gw9m\" (UniqueName: \"kubernetes.io/projected/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-kube-api-access-8gw9m\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.495606 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-utilities\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.495727 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-catalog-content\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.496298 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-utilities\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.496328 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-catalog-content\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.517296 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gw9m\" (UniqueName: \"kubernetes.io/projected/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-kube-api-access-8gw9m\") pod \"certified-operators-fcbvk\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:35 crc kubenswrapper[4925]: I0121 12:15:35.546387 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:36 crc kubenswrapper[4925]: I0121 12:15:36.379157 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fcbvk"] Jan 21 12:15:36 crc kubenswrapper[4925]: W0121 12:15:36.386227 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod459e81a2_bdab_4351_9fec_f1a2fc6ab61f.slice/crio-0646babc7d79bf2554bdb652aa97a48a5ceba757146c862a34d0772426a48b10 WatchSource:0}: Error finding container 0646babc7d79bf2554bdb652aa97a48a5ceba757146c862a34d0772426a48b10: Status 404 returned error can't find the container with id 0646babc7d79bf2554bdb652aa97a48a5ceba757146c862a34d0772426a48b10 Jan 21 12:15:37 crc kubenswrapper[4925]: I0121 12:15:37.117170 4925 generic.go:334] "Generic (PLEG): container finished" podID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerID="a8b37e371355aa0f953cb0435864fb809cda8c4a179109454fb748b91078ae39" exitCode=0 Jan 21 12:15:37 crc kubenswrapper[4925]: I0121 12:15:37.117230 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcbvk" event={"ID":"459e81a2-bdab-4351-9fec-f1a2fc6ab61f","Type":"ContainerDied","Data":"a8b37e371355aa0f953cb0435864fb809cda8c4a179109454fb748b91078ae39"} Jan 21 12:15:37 crc kubenswrapper[4925]: I0121 12:15:37.117260 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcbvk" event={"ID":"459e81a2-bdab-4351-9fec-f1a2fc6ab61f","Type":"ContainerStarted","Data":"0646babc7d79bf2554bdb652aa97a48a5ceba757146c862a34d0772426a48b10"} Jan 21 12:15:39 crc kubenswrapper[4925]: I0121 12:15:39.138784 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcbvk" event={"ID":"459e81a2-bdab-4351-9fec-f1a2fc6ab61f","Type":"ContainerStarted","Data":"9891db6da03c21625c606c07885503a56ed91098e865081fc44587fed240039f"} Jan 21 12:15:40 crc kubenswrapper[4925]: I0121 12:15:40.151090 4925 generic.go:334] "Generic (PLEG): container finished" podID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerID="9891db6da03c21625c606c07885503a56ed91098e865081fc44587fed240039f" exitCode=0 Jan 21 12:15:40 crc kubenswrapper[4925]: I0121 12:15:40.151150 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcbvk" event={"ID":"459e81a2-bdab-4351-9fec-f1a2fc6ab61f","Type":"ContainerDied","Data":"9891db6da03c21625c606c07885503a56ed91098e865081fc44587fed240039f"} Jan 21 12:15:41 crc kubenswrapper[4925]: I0121 12:15:41.558024 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:15:41 crc kubenswrapper[4925]: E0121 12:15:41.558965 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:15:42 crc kubenswrapper[4925]: I0121 12:15:42.322811 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcbvk" event={"ID":"459e81a2-bdab-4351-9fec-f1a2fc6ab61f","Type":"ContainerStarted","Data":"467e11ddd2e4d10d8ab5860833d6a26e4f0ebf8739cfa6f3607e17252a26b301"} Jan 21 12:15:42 crc kubenswrapper[4925]: I0121 12:15:42.354288 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fcbvk" podStartSLOduration=3.5635158369999997 podStartE2EDuration="7.354244911s" podCreationTimestamp="2026-01-21 12:15:35 +0000 UTC" firstStartedPulling="2026-01-21 12:15:37.119160627 +0000 UTC m=+4828.723052561" lastFinishedPulling="2026-01-21 12:15:40.909889701 +0000 UTC m=+4832.513781635" observedRunningTime="2026-01-21 12:15:42.350082649 +0000 UTC m=+4833.953974603" watchObservedRunningTime="2026-01-21 12:15:42.354244911 +0000 UTC m=+4833.958136865" Jan 21 12:15:45 crc kubenswrapper[4925]: I0121 12:15:45.547250 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:45 crc kubenswrapper[4925]: I0121 12:15:45.547683 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:45 crc kubenswrapper[4925]: I0121 12:15:45.598627 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:46 crc kubenswrapper[4925]: I0121 12:15:46.410582 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:49 crc kubenswrapper[4925]: I0121 12:15:49.174511 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fcbvk"] Jan 21 12:15:49 crc kubenswrapper[4925]: I0121 12:15:49.175162 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fcbvk" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="registry-server" containerID="cri-o://467e11ddd2e4d10d8ab5860833d6a26e4f0ebf8739cfa6f3607e17252a26b301" gracePeriod=2 Jan 21 12:15:50 crc kubenswrapper[4925]: I0121 12:15:50.398848 4925 generic.go:334] "Generic (PLEG): container finished" podID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerID="467e11ddd2e4d10d8ab5860833d6a26e4f0ebf8739cfa6f3607e17252a26b301" exitCode=0 Jan 21 12:15:50 crc kubenswrapper[4925]: I0121 12:15:50.398918 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcbvk" event={"ID":"459e81a2-bdab-4351-9fec-f1a2fc6ab61f","Type":"ContainerDied","Data":"467e11ddd2e4d10d8ab5860833d6a26e4f0ebf8739cfa6f3607e17252a26b301"} Jan 21 12:15:50 crc kubenswrapper[4925]: I0121 12:15:50.891629 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.030552 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-catalog-content\") pod \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.031084 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-utilities\") pod \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.031229 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gw9m\" (UniqueName: \"kubernetes.io/projected/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-kube-api-access-8gw9m\") pod \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\" (UID: \"459e81a2-bdab-4351-9fec-f1a2fc6ab61f\") " Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.032040 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-utilities" (OuterVolumeSpecName: "utilities") pod "459e81a2-bdab-4351-9fec-f1a2fc6ab61f" (UID: "459e81a2-bdab-4351-9fec-f1a2fc6ab61f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.051071 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-kube-api-access-8gw9m" (OuterVolumeSpecName: "kube-api-access-8gw9m") pod "459e81a2-bdab-4351-9fec-f1a2fc6ab61f" (UID: "459e81a2-bdab-4351-9fec-f1a2fc6ab61f"). InnerVolumeSpecName "kube-api-access-8gw9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.082112 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "459e81a2-bdab-4351-9fec-f1a2fc6ab61f" (UID: "459e81a2-bdab-4351-9fec-f1a2fc6ab61f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.134223 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.134267 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.134280 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gw9m\" (UniqueName: \"kubernetes.io/projected/459e81a2-bdab-4351-9fec-f1a2fc6ab61f-kube-api-access-8gw9m\") on node \"crc\" DevicePath \"\"" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.411321 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcbvk" event={"ID":"459e81a2-bdab-4351-9fec-f1a2fc6ab61f","Type":"ContainerDied","Data":"0646babc7d79bf2554bdb652aa97a48a5ceba757146c862a34d0772426a48b10"} Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.411446 4925 scope.go:117] "RemoveContainer" containerID="467e11ddd2e4d10d8ab5860833d6a26e4f0ebf8739cfa6f3607e17252a26b301" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.411449 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcbvk" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.439094 4925 scope.go:117] "RemoveContainer" containerID="9891db6da03c21625c606c07885503a56ed91098e865081fc44587fed240039f" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.451822 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fcbvk"] Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.459205 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fcbvk"] Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.479224 4925 scope.go:117] "RemoveContainer" containerID="a8b37e371355aa0f953cb0435864fb809cda8c4a179109454fb748b91078ae39" Jan 21 12:15:51 crc kubenswrapper[4925]: I0121 12:15:51.513109 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" path="/var/lib/kubelet/pods/459e81a2-bdab-4351-9fec-f1a2fc6ab61f/volumes" Jan 21 12:15:52 crc kubenswrapper[4925]: I0121 12:15:52.503761 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:15:52 crc kubenswrapper[4925]: E0121 12:15:52.504025 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:16:06 crc kubenswrapper[4925]: I0121 12:16:06.502276 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:16:06 crc kubenswrapper[4925]: E0121 12:16:06.502972 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:16:21 crc kubenswrapper[4925]: I0121 12:16:21.502528 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:16:21 crc kubenswrapper[4925]: E0121 12:16:21.503053 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.789916 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pwlgx"] Jan 21 12:16:24 crc kubenswrapper[4925]: E0121 12:16:24.791062 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="extract-content" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.791085 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="extract-content" Jan 21 12:16:24 crc kubenswrapper[4925]: E0121 12:16:24.791106 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="registry-server" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.791115 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="registry-server" Jan 21 12:16:24 crc kubenswrapper[4925]: E0121 12:16:24.791137 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="extract-utilities" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.791147 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="extract-utilities" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.791425 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="459e81a2-bdab-4351-9fec-f1a2fc6ab61f" containerName="registry-server" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.793186 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.816403 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pwlgx"] Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.866232 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-catalog-content\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.866449 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-utilities\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.866482 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dtjf\" (UniqueName: \"kubernetes.io/projected/1aa431d7-40f3-4155-bee1-463f781910d3-kube-api-access-5dtjf\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.968161 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-catalog-content\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.968336 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-utilities\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.968411 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dtjf\" (UniqueName: \"kubernetes.io/projected/1aa431d7-40f3-4155-bee1-463f781910d3-kube-api-access-5dtjf\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.972576 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-catalog-content\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.973578 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-utilities\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:24 crc kubenswrapper[4925]: I0121 12:16:24.994693 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dtjf\" (UniqueName: \"kubernetes.io/projected/1aa431d7-40f3-4155-bee1-463f781910d3-kube-api-access-5dtjf\") pod \"community-operators-pwlgx\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:25 crc kubenswrapper[4925]: I0121 12:16:25.132346 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:25 crc kubenswrapper[4925]: I0121 12:16:25.655383 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pwlgx"] Jan 21 12:16:25 crc kubenswrapper[4925]: I0121 12:16:25.962229 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwlgx" event={"ID":"1aa431d7-40f3-4155-bee1-463f781910d3","Type":"ContainerStarted","Data":"d699aec9fad5acf8f058b11fc119a94fcd815999e7f97148823f7c0688683210"} Jan 21 12:16:28 crc kubenswrapper[4925]: I0121 12:16:28.989765 4925 generic.go:334] "Generic (PLEG): container finished" podID="1aa431d7-40f3-4155-bee1-463f781910d3" containerID="73afb2267d6f08860b9dd0664acdbf704ed7e5d7e8ea803f7283ca93766172c2" exitCode=0 Jan 21 12:16:28 crc kubenswrapper[4925]: I0121 12:16:28.989858 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwlgx" event={"ID":"1aa431d7-40f3-4155-bee1-463f781910d3","Type":"ContainerDied","Data":"73afb2267d6f08860b9dd0664acdbf704ed7e5d7e8ea803f7283ca93766172c2"} Jan 21 12:16:35 crc kubenswrapper[4925]: I0121 12:16:35.152936 4925 generic.go:334] "Generic (PLEG): container finished" podID="1aa431d7-40f3-4155-bee1-463f781910d3" containerID="54449c33233348c957f0644cef9103f5299edf5f20a1b60dc0a7a59f1875b251" exitCode=0 Jan 21 12:16:35 crc kubenswrapper[4925]: I0121 12:16:35.153808 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwlgx" event={"ID":"1aa431d7-40f3-4155-bee1-463f781910d3","Type":"ContainerDied","Data":"54449c33233348c957f0644cef9103f5299edf5f20a1b60dc0a7a59f1875b251"} Jan 21 12:16:35 crc kubenswrapper[4925]: I0121 12:16:35.502634 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:16:35 crc kubenswrapper[4925]: E0121 12:16:35.503023 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:16:37 crc kubenswrapper[4925]: I0121 12:16:37.185092 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwlgx" event={"ID":"1aa431d7-40f3-4155-bee1-463f781910d3","Type":"ContainerStarted","Data":"c822ddd6eb7c8a9ba5d9869c4cc3b2cbab0b60aae92a8e01a9a541e9f04250ba"} Jan 21 12:16:38 crc kubenswrapper[4925]: I0121 12:16:38.222918 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pwlgx" podStartSLOduration=7.690869721 podStartE2EDuration="14.22289311s" podCreationTimestamp="2026-01-21 12:16:24 +0000 UTC" firstStartedPulling="2026-01-21 12:16:29.999020956 +0000 UTC m=+4881.602912890" lastFinishedPulling="2026-01-21 12:16:36.531044345 +0000 UTC m=+4888.134936279" observedRunningTime="2026-01-21 12:16:38.216433857 +0000 UTC m=+4889.820325801" watchObservedRunningTime="2026-01-21 12:16:38.22289311 +0000 UTC m=+4889.826785044" Jan 21 12:16:45 crc kubenswrapper[4925]: I0121 12:16:45.133316 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:45 crc kubenswrapper[4925]: I0121 12:16:45.133831 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:45 crc kubenswrapper[4925]: I0121 12:16:45.197080 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:45 crc kubenswrapper[4925]: I0121 12:16:45.402102 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:48 crc kubenswrapper[4925]: I0121 12:16:48.776802 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pwlgx"] Jan 21 12:16:48 crc kubenswrapper[4925]: I0121 12:16:48.777512 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pwlgx" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="registry-server" containerID="cri-o://c822ddd6eb7c8a9ba5d9869c4cc3b2cbab0b60aae92a8e01a9a541e9f04250ba" gracePeriod=2 Jan 21 12:16:49 crc kubenswrapper[4925]: I0121 12:16:49.632149 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:16:49 crc kubenswrapper[4925]: E0121 12:16:49.632835 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:16:54 crc kubenswrapper[4925]: I0121 12:16:54.765530 4925 generic.go:334] "Generic (PLEG): container finished" podID="1aa431d7-40f3-4155-bee1-463f781910d3" containerID="c822ddd6eb7c8a9ba5d9869c4cc3b2cbab0b60aae92a8e01a9a541e9f04250ba" exitCode=0 Jan 21 12:16:54 crc kubenswrapper[4925]: I0121 12:16:54.765780 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwlgx" event={"ID":"1aa431d7-40f3-4155-bee1-463f781910d3","Type":"ContainerDied","Data":"c822ddd6eb7c8a9ba5d9869c4cc3b2cbab0b60aae92a8e01a9a541e9f04250ba"} Jan 21 12:16:54 crc kubenswrapper[4925]: I0121 12:16:54.977990 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.156039 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dtjf\" (UniqueName: \"kubernetes.io/projected/1aa431d7-40f3-4155-bee1-463f781910d3-kube-api-access-5dtjf\") pod \"1aa431d7-40f3-4155-bee1-463f781910d3\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.156378 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-catalog-content\") pod \"1aa431d7-40f3-4155-bee1-463f781910d3\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.156541 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-utilities\") pod \"1aa431d7-40f3-4155-bee1-463f781910d3\" (UID: \"1aa431d7-40f3-4155-bee1-463f781910d3\") " Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.157488 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-utilities" (OuterVolumeSpecName: "utilities") pod "1aa431d7-40f3-4155-bee1-463f781910d3" (UID: "1aa431d7-40f3-4155-bee1-463f781910d3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.166700 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aa431d7-40f3-4155-bee1-463f781910d3-kube-api-access-5dtjf" (OuterVolumeSpecName: "kube-api-access-5dtjf") pod "1aa431d7-40f3-4155-bee1-463f781910d3" (UID: "1aa431d7-40f3-4155-bee1-463f781910d3"). InnerVolumeSpecName "kube-api-access-5dtjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.226009 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1aa431d7-40f3-4155-bee1-463f781910d3" (UID: "1aa431d7-40f3-4155-bee1-463f781910d3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.259082 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dtjf\" (UniqueName: \"kubernetes.io/projected/1aa431d7-40f3-4155-bee1-463f781910d3-kube-api-access-5dtjf\") on node \"crc\" DevicePath \"\"" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.259138 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.259150 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa431d7-40f3-4155-bee1-463f781910d3-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.864872 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwlgx" event={"ID":"1aa431d7-40f3-4155-bee1-463f781910d3","Type":"ContainerDied","Data":"d699aec9fad5acf8f058b11fc119a94fcd815999e7f97148823f7c0688683210"} Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.864949 4925 scope.go:117] "RemoveContainer" containerID="c822ddd6eb7c8a9ba5d9869c4cc3b2cbab0b60aae92a8e01a9a541e9f04250ba" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.865088 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwlgx" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.899290 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pwlgx"] Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.907022 4925 scope.go:117] "RemoveContainer" containerID="54449c33233348c957f0644cef9103f5299edf5f20a1b60dc0a7a59f1875b251" Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.929473 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pwlgx"] Jan 21 12:16:55 crc kubenswrapper[4925]: I0121 12:16:55.950746 4925 scope.go:117] "RemoveContainer" containerID="73afb2267d6f08860b9dd0664acdbf704ed7e5d7e8ea803f7283ca93766172c2" Jan 21 12:16:57 crc kubenswrapper[4925]: I0121 12:16:57.515905 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" path="/var/lib/kubelet/pods/1aa431d7-40f3-4155-bee1-463f781910d3/volumes" Jan 21 12:17:02 crc kubenswrapper[4925]: I0121 12:17:02.502147 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:17:02 crc kubenswrapper[4925]: E0121 12:17:02.502768 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:17:17 crc kubenswrapper[4925]: I0121 12:17:17.502372 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:17:17 crc kubenswrapper[4925]: E0121 12:17:17.503242 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:17:28 crc kubenswrapper[4925]: I0121 12:17:28.501925 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:17:30 crc kubenswrapper[4925]: I0121 12:17:30.073698 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"d8f2019edc393d954aef1b60893c29663652a31b121eed239192494936b483d9"} Jan 21 12:17:44 crc kubenswrapper[4925]: I0121 12:17:44.987235 4925 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wk565"] Jan 21 12:17:44 crc kubenswrapper[4925]: E0121 12:17:44.988292 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="registry-server" Jan 21 12:17:44 crc kubenswrapper[4925]: I0121 12:17:44.988316 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="registry-server" Jan 21 12:17:44 crc kubenswrapper[4925]: E0121 12:17:44.988368 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="extract-utilities" Jan 21 12:17:44 crc kubenswrapper[4925]: I0121 12:17:44.988375 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="extract-utilities" Jan 21 12:17:44 crc kubenswrapper[4925]: E0121 12:17:44.988420 4925 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="extract-content" Jan 21 12:17:44 crc kubenswrapper[4925]: I0121 12:17:44.988429 4925 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="extract-content" Jan 21 12:17:44 crc kubenswrapper[4925]: I0121 12:17:44.988605 4925 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa431d7-40f3-4155-bee1-463f781910d3" containerName="registry-server" Jan 21 12:17:44 crc kubenswrapper[4925]: I0121 12:17:44.990027 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.013748 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk565"] Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.110678 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-utilities\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.110760 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zqlm\" (UniqueName: \"kubernetes.io/projected/f699881b-8684-4a6a-879d-46fcd2c8af04-kube-api-access-7zqlm\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.111032 4925 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-catalog-content\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.215518 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zqlm\" (UniqueName: \"kubernetes.io/projected/f699881b-8684-4a6a-879d-46fcd2c8af04-kube-api-access-7zqlm\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.215906 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-catalog-content\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.216733 4925 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-utilities\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.216570 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-catalog-content\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.217104 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-utilities\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.240435 4925 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zqlm\" (UniqueName: \"kubernetes.io/projected/f699881b-8684-4a6a-879d-46fcd2c8af04-kube-api-access-7zqlm\") pod \"redhat-marketplace-wk565\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.315134 4925 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:17:45 crc kubenswrapper[4925]: I0121 12:17:45.653423 4925 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk565"] Jan 21 12:17:45 crc kubenswrapper[4925]: W0121 12:17:45.684210 4925 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf699881b_8684_4a6a_879d_46fcd2c8af04.slice/crio-6f0137b8bea6d0d8c2bf400c6c216e6d916b4003fd828dc84eef9cc7a4e33a68 WatchSource:0}: Error finding container 6f0137b8bea6d0d8c2bf400c6c216e6d916b4003fd828dc84eef9cc7a4e33a68: Status 404 returned error can't find the container with id 6f0137b8bea6d0d8c2bf400c6c216e6d916b4003fd828dc84eef9cc7a4e33a68 Jan 21 12:17:46 crc kubenswrapper[4925]: I0121 12:17:46.221842 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk565" event={"ID":"f699881b-8684-4a6a-879d-46fcd2c8af04","Type":"ContainerStarted","Data":"6f0137b8bea6d0d8c2bf400c6c216e6d916b4003fd828dc84eef9cc7a4e33a68"} Jan 21 12:17:47 crc kubenswrapper[4925]: I0121 12:17:47.231839 4925 generic.go:334] "Generic (PLEG): container finished" podID="f699881b-8684-4a6a-879d-46fcd2c8af04" containerID="0c89c255160387af67b2da632bdfce8a9b41514af0ae011f9f4a305fb39fe855" exitCode=0 Jan 21 12:17:47 crc kubenswrapper[4925]: I0121 12:17:47.231936 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk565" event={"ID":"f699881b-8684-4a6a-879d-46fcd2c8af04","Type":"ContainerDied","Data":"0c89c255160387af67b2da632bdfce8a9b41514af0ae011f9f4a305fb39fe855"} Jan 21 12:17:51 crc kubenswrapper[4925]: I0121 12:17:51.338624 4925 generic.go:334] "Generic (PLEG): container finished" podID="f699881b-8684-4a6a-879d-46fcd2c8af04" containerID="a7994c79436ab0cfe857cb630d7e5240f821c52760010bc69f46c41370b9c0ec" exitCode=0 Jan 21 12:17:51 crc kubenswrapper[4925]: I0121 12:17:51.338718 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk565" event={"ID":"f699881b-8684-4a6a-879d-46fcd2c8af04","Type":"ContainerDied","Data":"a7994c79436ab0cfe857cb630d7e5240f821c52760010bc69f46c41370b9c0ec"} Jan 21 12:17:55 crc kubenswrapper[4925]: I0121 12:17:55.382334 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk565" event={"ID":"f699881b-8684-4a6a-879d-46fcd2c8af04","Type":"ContainerStarted","Data":"cb28444a7a657de241d4093b8bf10618fff7cad1702caced5464edbd1d459b3e"} Jan 21 12:17:55 crc kubenswrapper[4925]: I0121 12:17:55.402923 4925 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wk565" podStartSLOduration=4.260160414 podStartE2EDuration="11.402873122s" podCreationTimestamp="2026-01-21 12:17:44 +0000 UTC" firstStartedPulling="2026-01-21 12:17:47.233917806 +0000 UTC m=+4958.837809740" lastFinishedPulling="2026-01-21 12:17:54.376630514 +0000 UTC m=+4965.980522448" observedRunningTime="2026-01-21 12:17:55.401067665 +0000 UTC m=+4967.004959599" watchObservedRunningTime="2026-01-21 12:17:55.402873122 +0000 UTC m=+4967.006765056" Jan 21 12:18:05 crc kubenswrapper[4925]: I0121 12:18:05.316072 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:18:05 crc kubenswrapper[4925]: I0121 12:18:05.317040 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:18:05 crc kubenswrapper[4925]: I0121 12:18:05.362780 4925 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:18:05 crc kubenswrapper[4925]: I0121 12:18:05.514360 4925 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:18:09 crc kubenswrapper[4925]: I0121 12:18:09.982570 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk565"] Jan 21 12:18:09 crc kubenswrapper[4925]: I0121 12:18:09.983738 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wk565" podUID="f699881b-8684-4a6a-879d-46fcd2c8af04" containerName="registry-server" containerID="cri-o://cb28444a7a657de241d4093b8bf10618fff7cad1702caced5464edbd1d459b3e" gracePeriod=2 Jan 21 12:18:10 crc kubenswrapper[4925]: I0121 12:18:10.518326 4925 generic.go:334] "Generic (PLEG): container finished" podID="f699881b-8684-4a6a-879d-46fcd2c8af04" containerID="cb28444a7a657de241d4093b8bf10618fff7cad1702caced5464edbd1d459b3e" exitCode=0 Jan 21 12:18:10 crc kubenswrapper[4925]: I0121 12:18:10.518409 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk565" event={"ID":"f699881b-8684-4a6a-879d-46fcd2c8af04","Type":"ContainerDied","Data":"cb28444a7a657de241d4093b8bf10618fff7cad1702caced5464edbd1d459b3e"} Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.189792 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.309343 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-utilities\") pod \"f699881b-8684-4a6a-879d-46fcd2c8af04\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.309674 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-catalog-content\") pod \"f699881b-8684-4a6a-879d-46fcd2c8af04\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.309717 4925 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zqlm\" (UniqueName: \"kubernetes.io/projected/f699881b-8684-4a6a-879d-46fcd2c8af04-kube-api-access-7zqlm\") pod \"f699881b-8684-4a6a-879d-46fcd2c8af04\" (UID: \"f699881b-8684-4a6a-879d-46fcd2c8af04\") " Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.311104 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-utilities" (OuterVolumeSpecName: "utilities") pod "f699881b-8684-4a6a-879d-46fcd2c8af04" (UID: "f699881b-8684-4a6a-879d-46fcd2c8af04"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.315597 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f699881b-8684-4a6a-879d-46fcd2c8af04-kube-api-access-7zqlm" (OuterVolumeSpecName: "kube-api-access-7zqlm") pod "f699881b-8684-4a6a-879d-46fcd2c8af04" (UID: "f699881b-8684-4a6a-879d-46fcd2c8af04"). InnerVolumeSpecName "kube-api-access-7zqlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.337344 4925 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f699881b-8684-4a6a-879d-46fcd2c8af04" (UID: "f699881b-8684-4a6a-879d-46fcd2c8af04"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.412611 4925 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.412655 4925 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f699881b-8684-4a6a-879d-46fcd2c8af04-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.412671 4925 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zqlm\" (UniqueName: \"kubernetes.io/projected/f699881b-8684-4a6a-879d-46fcd2c8af04-kube-api-access-7zqlm\") on node \"crc\" DevicePath \"\"" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.535012 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wk565" event={"ID":"f699881b-8684-4a6a-879d-46fcd2c8af04","Type":"ContainerDied","Data":"6f0137b8bea6d0d8c2bf400c6c216e6d916b4003fd828dc84eef9cc7a4e33a68"} Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.535448 4925 scope.go:117] "RemoveContainer" containerID="cb28444a7a657de241d4093b8bf10618fff7cad1702caced5464edbd1d459b3e" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.535057 4925 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wk565" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.571002 4925 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk565"] Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.573669 4925 scope.go:117] "RemoveContainer" containerID="a7994c79436ab0cfe857cb630d7e5240f821c52760010bc69f46c41370b9c0ec" Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.580225 4925 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wk565"] Jan 21 12:18:11 crc kubenswrapper[4925]: I0121 12:18:11.594406 4925 scope.go:117] "RemoveContainer" containerID="0c89c255160387af67b2da632bdfce8a9b41514af0ae011f9f4a305fb39fe855" Jan 21 12:18:13 crc kubenswrapper[4925]: I0121 12:18:13.515435 4925 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f699881b-8684-4a6a-879d-46fcd2c8af04" path="/var/lib/kubelet/pods/f699881b-8684-4a6a-879d-46fcd2c8af04/volumes" Jan 21 12:19:49 crc kubenswrapper[4925]: I0121 12:19:49.940612 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:19:49 crc kubenswrapper[4925]: I0121 12:19:49.941362 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:20:19 crc kubenswrapper[4925]: I0121 12:20:19.941672 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:20:19 crc kubenswrapper[4925]: I0121 12:20:19.942380 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:20:49 crc kubenswrapper[4925]: I0121 12:20:49.941280 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:20:49 crc kubenswrapper[4925]: I0121 12:20:49.941955 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:20:49 crc kubenswrapper[4925]: I0121 12:20:49.942048 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 12:20:49 crc kubenswrapper[4925]: I0121 12:20:49.942717 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d8f2019edc393d954aef1b60893c29663652a31b121eed239192494936b483d9"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 12:20:49 crc kubenswrapper[4925]: I0121 12:20:49.942785 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://d8f2019edc393d954aef1b60893c29663652a31b121eed239192494936b483d9" gracePeriod=600 Jan 21 12:20:50 crc kubenswrapper[4925]: I0121 12:20:50.517602 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="d8f2019edc393d954aef1b60893c29663652a31b121eed239192494936b483d9" exitCode=0 Jan 21 12:20:50 crc kubenswrapper[4925]: I0121 12:20:50.517955 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"d8f2019edc393d954aef1b60893c29663652a31b121eed239192494936b483d9"} Jan 21 12:20:50 crc kubenswrapper[4925]: I0121 12:20:50.518065 4925 scope.go:117] "RemoveContainer" containerID="db736a59de9a9cadc877671cd6a2e54d4721edeaabdd31b2143672e9d7ab0b0a" Jan 21 12:20:51 crc kubenswrapper[4925]: I0121 12:20:51.527413 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerStarted","Data":"bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3"} Jan 21 12:23:19 crc kubenswrapper[4925]: I0121 12:23:19.941496 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:23:19 crc kubenswrapper[4925]: I0121 12:23:19.942311 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:23:49 crc kubenswrapper[4925]: I0121 12:23:49.941244 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:23:49 crc kubenswrapper[4925]: I0121 12:23:49.942201 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:24:19 crc kubenswrapper[4925]: I0121 12:24:19.941066 4925 patch_prober.go:28] interesting pod/machine-config-daemon-rzs4q container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 12:24:19 crc kubenswrapper[4925]: I0121 12:24:19.941848 4925 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 12:24:19 crc kubenswrapper[4925]: I0121 12:24:19.941957 4925 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" Jan 21 12:24:19 crc kubenswrapper[4925]: I0121 12:24:19.942867 4925 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3"} pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 12:24:19 crc kubenswrapper[4925]: I0121 12:24:19.942978 4925 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerName="machine-config-daemon" containerID="cri-o://bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" gracePeriod=600 Jan 21 12:24:21 crc kubenswrapper[4925]: I0121 12:24:21.047993 4925 generic.go:334] "Generic (PLEG): container finished" podID="f21c81eb-6979-46c3-9594-e4916d36fb0a" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" exitCode=0 Jan 21 12:24:21 crc kubenswrapper[4925]: I0121 12:24:21.048128 4925 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" event={"ID":"f21c81eb-6979-46c3-9594-e4916d36fb0a","Type":"ContainerDied","Data":"bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3"} Jan 21 12:24:21 crc kubenswrapper[4925]: I0121 12:24:21.048590 4925 scope.go:117] "RemoveContainer" containerID="d8f2019edc393d954aef1b60893c29663652a31b121eed239192494936b483d9" Jan 21 12:24:21 crc kubenswrapper[4925]: E0121 12:24:21.692117 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:24:22 crc kubenswrapper[4925]: I0121 12:24:22.058195 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:24:22 crc kubenswrapper[4925]: E0121 12:24:22.058546 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:24:36 crc kubenswrapper[4925]: I0121 12:24:36.502482 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:24:36 crc kubenswrapper[4925]: E0121 12:24:36.503610 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:24:48 crc kubenswrapper[4925]: I0121 12:24:48.503356 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:24:48 crc kubenswrapper[4925]: E0121 12:24:48.505491 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:25:00 crc kubenswrapper[4925]: I0121 12:25:00.501656 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:25:00 crc kubenswrapper[4925]: E0121 12:25:00.502475 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:25:12 crc kubenswrapper[4925]: I0121 12:25:12.501851 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:25:12 crc kubenswrapper[4925]: E0121 12:25:12.502818 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:25:24 crc kubenswrapper[4925]: I0121 12:25:24.501557 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:25:24 crc kubenswrapper[4925]: E0121 12:25:24.502445 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:25:36 crc kubenswrapper[4925]: I0121 12:25:36.502872 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:25:36 crc kubenswrapper[4925]: E0121 12:25:36.503757 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:25:51 crc kubenswrapper[4925]: I0121 12:25:51.502706 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:25:51 crc kubenswrapper[4925]: E0121 12:25:51.504872 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" Jan 21 12:26:06 crc kubenswrapper[4925]: I0121 12:26:06.503742 4925 scope.go:117] "RemoveContainer" containerID="bd4179baefff96af8d15b693f990b1d508679183a2b42807c972e7b1246951b3" Jan 21 12:26:06 crc kubenswrapper[4925]: E0121 12:26:06.504625 4925 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rzs4q_openshift-machine-config-operator(f21c81eb-6979-46c3-9594-e4916d36fb0a)\"" pod="openshift-machine-config-operator/machine-config-daemon-rzs4q" podUID="f21c81eb-6979-46c3-9594-e4916d36fb0a" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515134142757024456 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015134142760017365 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015134127710016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015134127710015456 5ustar corecore